From 1d8fb1f8588c6ad33aad690298cd98db1ce3b27a Mon Sep 17 00:00:00 2001 From: Julian Sikorski Date: Fri, 13 Sep 2024 21:53:03 +0000 Subject: [PATCH] Update odroidxu4-current to 6.6.51 --- .../odroidxu4-6.6/patch-6.6.44-45.patch | 5821 ++++++ .../odroidxu4-6.6/patch-6.6.45-46.patch | 6300 +++++++ .../odroidxu4-6.6/patch-6.6.46-47.patch | 4211 +++++ .../odroidxu4-6.6/patch-6.6.47-48.patch | 14889 ++++++++++++++++ .../odroidxu4-6.6/patch-6.6.48-49.patch | 3833 ++++ .../odroidxu4-6.6/patch-6.6.49-50.patch | 5960 +++++++ .../odroidxu4-6.6/patch-6.6.50-51.patch | 12247 +++++++++++++ 7 files changed, 53261 insertions(+) create mode 100644 patch/kernel/archive/odroidxu4-6.6/patch-6.6.44-45.patch create mode 100644 patch/kernel/archive/odroidxu4-6.6/patch-6.6.45-46.patch create mode 100644 patch/kernel/archive/odroidxu4-6.6/patch-6.6.46-47.patch create mode 100644 patch/kernel/archive/odroidxu4-6.6/patch-6.6.47-48.patch create mode 100644 patch/kernel/archive/odroidxu4-6.6/patch-6.6.48-49.patch create mode 100644 patch/kernel/archive/odroidxu4-6.6/patch-6.6.49-50.patch create mode 100644 patch/kernel/archive/odroidxu4-6.6/patch-6.6.50-51.patch diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.44-45.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.44-45.patch new file mode 100644 index 000000000000..95da970d54ec --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.44-45.patch @@ -0,0 +1,5821 @@ +diff --git a/Makefile b/Makefile +index 2e5d92ce2774d..0bd4bee2128b4 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 44 ++SUBLEVEL = 45 + EXTRAVERSION = + NAME = Hurr durr I'ma ninja sloth + +diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c +index 7147edbe56c67..1d230ac9d0eb5 100644 +--- a/arch/arm/kernel/perf_callchain.c ++++ b/arch/arm/kernel/perf_callchain.c +@@ -85,8 +85,7 @@ static bool + callchain_trace(void *data, unsigned long pc) + { + struct perf_callchain_entry_ctx *entry = data; +- perf_callchain_store(entry, pc); +- return true; ++ return perf_callchain_store(entry, pc) == 0; + } + + void +diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi +index 5effd8180cc41..e5993a365870c 100644 +--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi +@@ -641,6 +641,7 @@ dwc_0: usb@8a00000 { + interrupts = ; + phys = <&qusb_phy_0>, <&usb0_ssphy>; + phy-names = "usb2-phy", "usb3-phy"; ++ snps,parkmode-disable-ss-quirk; + snps,is-utmi-l1-suspend; + snps,hird-threshold = /bits/ 8 <0x0>; + snps,dis_u2_susphy_quirk; +@@ -683,6 +684,7 @@ dwc_1: usb@8c00000 { + interrupts = ; + phys = <&qusb_phy_1>, <&usb1_ssphy>; + phy-names = "usb2-phy", "usb3-phy"; ++ snps,parkmode-disable-ss-quirk; + snps,is-utmi-l1-suspend; + snps,hird-threshold = /bits/ 8 <0x0>; + snps,dis_u2_susphy_quirk; +diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi +index 9c072ce197358..7fcc15b6946ae 100644 +--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi ++++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi +@@ -2159,7 +2159,8 @@ usb3_dwc3: usb@a800000 { + interrupts = ; + snps,dis_u2_susphy_quirk; + snps,dis_enblslpm_quirk; +- phys = <&qusb2phy>, <&usb1_ssphy>; ++ snps,parkmode-disable-ss-quirk; ++ phys = <&qusb2phy>, <&usb3phy>; + phy-names = "usb2-phy", "usb3-phy"; + snps,has-lpm-erratum; + snps,hird-threshold = /bits/ 8 <0x10>; +@@ -2168,33 +2169,26 @@ usb3_dwc3: usb@a800000 { + + usb3phy: phy@c010000 { + compatible = "qcom,msm8998-qmp-usb3-phy"; +- reg = <0x0c010000 0x18c>; +- status = "disabled"; +- #address-cells = <1>; +- #size-cells = <1>; +- ranges; ++ reg = <0x0c010000 0x1000>; + + clocks = <&gcc GCC_USB3_PHY_AUX_CLK>, ++ <&gcc GCC_USB3_CLKREF_CLK>, + <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>, +- <&gcc GCC_USB3_CLKREF_CLK>; +- clock-names = "aux", "cfg_ahb", "ref"; ++ <&gcc GCC_USB3_PHY_PIPE_CLK>; ++ clock-names = "aux", ++ "ref", ++ "cfg_ahb", ++ "pipe"; ++ clock-output-names = "usb3_phy_pipe_clk_src"; ++ #clock-cells = <0>; ++ #phy-cells = <0>; + + resets = <&gcc GCC_USB3_PHY_BCR>, + <&gcc GCC_USB3PHY_PHY_BCR>; +- reset-names = "phy", "common"; ++ reset-names = "phy", ++ "phy_phy"; + +- usb1_ssphy: phy@c010200 { +- reg = <0xc010200 0x128>, +- <0xc010400 0x200>, +- <0xc010c00 0x20c>, +- <0xc010600 0x128>, +- <0xc010800 0x200>; +- #phy-cells = <0>; +- #clock-cells = <0>; +- clocks = <&gcc GCC_USB3_PHY_PIPE_CLK>; +- clock-names = "pipe0"; +- clock-output-names = "usb3_phy_pipe_clk_src"; +- }; ++ status = "disabled"; + }; + + qusb2phy: phy@c012000 { +diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi +index f7c528ecb224b..68b1c017a9fd5 100644 +--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi ++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -2795,49 +2796,28 @@ usb_1_hsphy: phy@88e3000 { + nvmem-cells = <&qusb2p_hstx_trim>; + }; + +- usb_1_qmpphy: phy-wrapper@88e9000 { ++ usb_1_qmpphy: phy@88e8000 { + compatible = "qcom,sc7180-qmp-usb3-dp-phy"; +- reg = <0 0x088e9000 0 0x18c>, +- <0 0x088e8000 0 0x3c>, +- <0 0x088ea000 0 0x18c>; ++ reg = <0 0x088e8000 0 0x3000>; + status = "disabled"; +- #address-cells = <2>; +- #size-cells = <2>; +- ranges; + + clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>, +- <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>, + <&gcc GCC_USB3_PRIM_CLKREF_CLK>, +- <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>; +- clock-names = "aux", "cfg_ahb", "ref", "com_aux"; ++ <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>, ++ <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>, ++ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>; ++ clock-names = "aux", ++ "ref", ++ "com_aux", ++ "usb3_pipe", ++ "cfg_ahb"; + + resets = <&gcc GCC_USB3_PHY_PRIM_BCR>, + <&gcc GCC_USB3_DP_PHY_PRIM_BCR>; + reset-names = "phy", "common"; + +- usb_1_ssphy: usb3-phy@88e9200 { +- reg = <0 0x088e9200 0 0x128>, +- <0 0x088e9400 0 0x200>, +- <0 0x088e9c00 0 0x218>, +- <0 0x088e9600 0 0x128>, +- <0 0x088e9800 0 0x200>, +- <0 0x088e9a00 0 0x18>; +- #clock-cells = <0>; +- #phy-cells = <0>; +- clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; +- clock-names = "pipe0"; +- clock-output-names = "usb3_phy_pipe_clk_src"; +- }; +- +- dp_phy: dp-phy@88ea200 { +- reg = <0 0x088ea200 0 0x200>, +- <0 0x088ea400 0 0x200>, +- <0 0x088eaa00 0 0x200>, +- <0 0x088ea600 0 0x200>, +- <0 0x088ea800 0 0x200>; +- #clock-cells = <1>; +- #phy-cells = <0>; +- }; ++ #clock-cells = <1>; ++ #phy-cells = <1>; + }; + + pmu@90b6300 { +@@ -3001,7 +2981,8 @@ usb_1_dwc3: usb@a600000 { + iommus = <&apps_smmu 0x540 0>; + snps,dis_u2_susphy_quirk; + snps,dis_enblslpm_quirk; +- phys = <&usb_1_hsphy>, <&usb_1_ssphy>; ++ snps,parkmode-disable-ss-quirk; ++ phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>; + phy-names = "usb2-phy", "usb3-phy"; + maximum-speed = "super-speed"; + }; +@@ -3307,8 +3288,9 @@ mdss_dp: displayport-controller@ae90000 { + "ctrl_link_iface", "stream_pixel"; + assigned-clocks = <&dispcc DISP_CC_MDSS_DP_LINK_CLK_SRC>, + <&dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>; +- assigned-clock-parents = <&dp_phy 0>, <&dp_phy 1>; +- phys = <&dp_phy>; ++ assigned-clock-parents = <&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>, ++ <&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>; ++ phys = <&usb_1_qmpphy QMP_USB43DP_DP_PHY>; + phy-names = "dp"; + + operating-points-v2 = <&dp_opp_table>; +@@ -3365,8 +3347,8 @@ dispcc: clock-controller@af00000 { + <&gcc GCC_DISP_GPLL0_CLK_SRC>, + <&mdss_dsi0_phy 0>, + <&mdss_dsi0_phy 1>, +- <&dp_phy 0>, +- <&dp_phy 1>; ++ <&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>, ++ <&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>; + clock-names = "bi_tcxo", + "gcc_disp_gpll0_clk_src", + "dsi0_phy_pll_out_byteclk", +diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi +index b75de7caaa7e5..149c7962f2cbb 100644 +--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi ++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi +@@ -18,6 +18,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -858,7 +859,7 @@ gcc: clock-controller@100000 { + <&rpmhcc RPMH_CXO_CLK_A>, <&sleep_clk>, + <0>, <&pcie1_lane>, + <0>, <0>, <0>, +- <&usb_1_ssphy>; ++ <&usb_1_qmpphy QMP_USB43DP_USB3_PIPE_CLK>; + clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk", + "pcie_0_pipe_clk", "pcie_1_pipe_clk", + "ufs_phy_rx_symbol_0_clk", "ufs_phy_rx_symbol_1_clk", +@@ -3351,49 +3352,26 @@ usb_2_hsphy: phy@88e4000 { + resets = <&gcc GCC_QUSB2PHY_SEC_BCR>; + }; + +- usb_1_qmpphy: phy-wrapper@88e9000 { +- compatible = "qcom,sc7280-qmp-usb3-dp-phy", +- "qcom,sm8250-qmp-usb3-dp-phy"; +- reg = <0 0x088e9000 0 0x200>, +- <0 0x088e8000 0 0x40>, +- <0 0x088ea000 0 0x200>; ++ usb_1_qmpphy: phy@88e8000 { ++ compatible = "qcom,sc7280-qmp-usb3-dp-phy"; ++ reg = <0 0x088e8000 0 0x3000>; + status = "disabled"; +- #address-cells = <2>; +- #size-cells = <2>; +- ranges; + + clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>, + <&rpmhcc RPMH_CXO_CLK>, +- <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>; +- clock-names = "aux", "ref_clk_src", "com_aux"; ++ <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>, ++ <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; ++ clock-names = "aux", ++ "ref", ++ "com_aux", ++ "usb3_pipe"; + + resets = <&gcc GCC_USB3_DP_PHY_PRIM_BCR>, + <&gcc GCC_USB3_PHY_PRIM_BCR>; + reset-names = "phy", "common"; + +- usb_1_ssphy: usb3-phy@88e9200 { +- reg = <0 0x088e9200 0 0x200>, +- <0 0x088e9400 0 0x200>, +- <0 0x088e9c00 0 0x400>, +- <0 0x088e9600 0 0x200>, +- <0 0x088e9800 0 0x200>, +- <0 0x088e9a00 0 0x100>; +- #clock-cells = <0>; +- #phy-cells = <0>; +- clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; +- clock-names = "pipe0"; +- clock-output-names = "usb3_phy_pipe_clk_src"; +- }; +- +- dp_phy: dp-phy@88ea200 { +- reg = <0 0x088ea200 0 0x200>, +- <0 0x088ea400 0 0x200>, +- <0 0x088eaa00 0 0x200>, +- <0 0x088ea600 0 0x200>, +- <0 0x088ea800 0 0x200>; +- #phy-cells = <0>; +- #clock-cells = <1>; +- }; ++ #clock-cells = <1>; ++ #phy-cells = <1>; + }; + + usb_2: usb@8cf8800 { +@@ -3702,7 +3680,8 @@ usb_1_dwc3: usb@a600000 { + iommus = <&apps_smmu 0xe0 0x0>; + snps,dis_u2_susphy_quirk; + snps,dis_enblslpm_quirk; +- phys = <&usb_1_hsphy>, <&usb_1_ssphy>; ++ snps,parkmode-disable-ss-quirk; ++ phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>; + phy-names = "usb2-phy", "usb3-phy"; + maximum-speed = "super-speed"; + }; +@@ -3807,8 +3786,8 @@ dispcc: clock-controller@af00000 { + <&gcc GCC_DISP_GPLL0_CLK_SRC>, + <&mdss_dsi_phy 0>, + <&mdss_dsi_phy 1>, +- <&dp_phy 0>, +- <&dp_phy 1>, ++ <&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>, ++ <&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>, + <&mdss_edp_phy 0>, + <&mdss_edp_phy 1>; + clock-names = "bi_tcxo", +@@ -4144,8 +4123,9 @@ mdss_dp: displayport-controller@ae90000 { + "stream_pixel"; + assigned-clocks = <&dispcc DISP_CC_MDSS_DP_LINK_CLK_SRC>, + <&dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>; +- assigned-clock-parents = <&dp_phy 0>, <&dp_phy 1>; +- phys = <&dp_phy>; ++ assigned-clock-parents = <&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>, ++ <&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>; ++ phys = <&usb_1_qmpphy QMP_USB43DP_DP_PHY>; + phy-names = "dp"; + + operating-points-v2 = <&dp_opp_table>; +diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi +index 9d9b378c07e14..dcdc8a0cd1819 100644 +--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi ++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi +@@ -18,6 +18,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -3983,80 +3984,54 @@ usb_2_hsphy: phy@88e3000 { + nvmem-cells = <&qusb2s_hstx_trim>; + }; + +- usb_1_qmpphy: phy@88e9000 { ++ usb_1_qmpphy: phy@88e8000 { + compatible = "qcom,sdm845-qmp-usb3-dp-phy"; +- reg = <0 0x088e9000 0 0x18c>, +- <0 0x088e8000 0 0x38>, +- <0 0x088ea000 0 0x40>; ++ reg = <0 0x088e8000 0 0x3000>; + status = "disabled"; +- #address-cells = <2>; +- #size-cells = <2>; +- ranges; + + clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>, +- <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>, + <&gcc GCC_USB3_PRIM_CLKREF_CLK>, +- <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>; +- clock-names = "aux", "cfg_ahb", "ref", "com_aux"; ++ <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>, ++ <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>, ++ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>; ++ clock-names = "aux", ++ "ref", ++ "com_aux", ++ "usb3_pipe", ++ "cfg_ahb"; + + resets = <&gcc GCC_USB3_PHY_PRIM_BCR>, + <&gcc GCC_USB3_DP_PHY_PRIM_BCR>; + reset-names = "phy", "common"; + +- usb_1_ssphy: usb3-phy@88e9200 { +- reg = <0 0x088e9200 0 0x128>, +- <0 0x088e9400 0 0x200>, +- <0 0x088e9c00 0 0x218>, +- <0 0x088e9600 0 0x128>, +- <0 0x088e9800 0 0x200>, +- <0 0x088e9a00 0 0x100>; +- #clock-cells = <0>; +- #phy-cells = <0>; +- clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; +- clock-names = "pipe0"; +- clock-output-names = "usb3_phy_pipe_clk_src"; +- }; +- +- dp_phy: dp-phy@88ea200 { +- reg = <0 0x088ea200 0 0x200>, +- <0 0x088ea400 0 0x200>, +- <0 0x088eaa00 0 0x200>, +- <0 0x088ea600 0 0x200>, +- <0 0x088ea800 0 0x200>; +- #clock-cells = <1>; +- #phy-cells = <0>; +- }; ++ #clock-cells = <1>; ++ #phy-cells = <1>; + }; + + usb_2_qmpphy: phy@88eb000 { + compatible = "qcom,sdm845-qmp-usb3-uni-phy"; +- reg = <0 0x088eb000 0 0x18c>; +- status = "disabled"; +- #address-cells = <2>; +- #size-cells = <2>; +- ranges; ++ reg = <0 0x088eb000 0 0x1000>; + + clocks = <&gcc GCC_USB3_SEC_PHY_AUX_CLK>, + <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>, + <&gcc GCC_USB3_SEC_CLKREF_CLK>, +- <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>; +- clock-names = "aux", "cfg_ahb", "ref", "com_aux"; ++ <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>, ++ <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>; ++ clock-names = "aux", ++ "cfg_ahb", ++ "ref", ++ "com_aux", ++ "pipe"; ++ clock-output-names = "usb3_uni_phy_pipe_clk_src"; ++ #clock-cells = <0>; ++ #phy-cells = <0>; + +- resets = <&gcc GCC_USB3PHY_PHY_SEC_BCR>, +- <&gcc GCC_USB3_PHY_SEC_BCR>; +- reset-names = "phy", "common"; ++ resets = <&gcc GCC_USB3_PHY_SEC_BCR>, ++ <&gcc GCC_USB3PHY_PHY_SEC_BCR>; ++ reset-names = "phy", ++ "phy_phy"; + +- usb_2_ssphy: phy@88eb200 { +- reg = <0 0x088eb200 0 0x128>, +- <0 0x088eb400 0 0x1fc>, +- <0 0x088eb800 0 0x218>, +- <0 0x088eb600 0 0x70>; +- #clock-cells = <0>; +- #phy-cells = <0>; +- clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>; +- clock-names = "pipe0"; +- clock-output-names = "usb3_uni_phy_pipe_clk_src"; +- }; ++ status = "disabled"; + }; + + usb_1: usb@a6f8800 { +@@ -4105,7 +4080,8 @@ usb_1_dwc3: usb@a600000 { + iommus = <&apps_smmu 0x740 0>; + snps,dis_u2_susphy_quirk; + snps,dis_enblslpm_quirk; +- phys = <&usb_1_hsphy>, <&usb_1_ssphy>; ++ snps,parkmode-disable-ss-quirk; ++ phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>; + phy-names = "usb2-phy", "usb3-phy"; + }; + }; +@@ -4156,7 +4132,8 @@ usb_2_dwc3: usb@a800000 { + iommus = <&apps_smmu 0x760 0>; + snps,dis_u2_susphy_quirk; + snps,dis_enblslpm_quirk; +- phys = <&usb_2_hsphy>, <&usb_2_ssphy>; ++ snps,parkmode-disable-ss-quirk; ++ phys = <&usb_2_hsphy>, <&usb_2_qmpphy>; + phy-names = "usb2-phy", "usb3-phy"; + }; + }; +@@ -4573,8 +4550,9 @@ mdss_dp: displayport-controller@ae90000 { + "ctrl_link_iface", "stream_pixel"; + assigned-clocks = <&dispcc DISP_CC_MDSS_DP_LINK_CLK_SRC>, + <&dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>; +- assigned-clock-parents = <&dp_phy 0>, <&dp_phy 1>; +- phys = <&dp_phy>; ++ assigned-clock-parents = <&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>, ++ <&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>; ++ phys = <&usb_1_qmpphy QMP_USB43DP_DP_PHY>; + phy-names = "dp"; + + operating-points-v2 = <&dp_opp_table>; +@@ -4912,8 +4890,8 @@ dispcc: clock-controller@af00000 { + <&mdss_dsi0_phy 1>, + <&mdss_dsi1_phy 0>, + <&mdss_dsi1_phy 1>, +- <&dp_phy 0>, +- <&dp_phy 1>; ++ <&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>, ++ <&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>; + clock-names = "bi_tcxo", + "gcc_disp_gpll0_clk_src", + "gcc_disp_gpll0_div_clk_src", +diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h +index 6aafbb7899916..4b99159150829 100644 +--- a/arch/arm64/include/asm/jump_label.h ++++ b/arch/arm64/include/asm/jump_label.h +@@ -13,6 +13,7 @@ + #include + #include + ++#define HAVE_JUMP_LABEL_BATCH + #define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE + + static __always_inline bool arch_static_branch(struct static_key * const key, +diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c +index faf88ec9c48e8..f63ea915d6ad2 100644 +--- a/arch/arm64/kernel/jump_label.c ++++ b/arch/arm64/kernel/jump_label.c +@@ -7,11 +7,12 @@ + */ + #include + #include ++#include + #include + #include + +-void arch_jump_label_transform(struct jump_entry *entry, +- enum jump_label_type type) ++bool arch_jump_label_transform_queue(struct jump_entry *entry, ++ enum jump_label_type type) + { + void *addr = (void *)jump_entry_code(entry); + u32 insn; +@@ -25,4 +26,10 @@ void arch_jump_label_transform(struct jump_entry *entry, + } + + aarch64_insn_patch_text_nosync(addr, insn); ++ return true; ++} ++ ++void arch_jump_label_transform_apply(void) ++{ ++ kick_all_cpus_sync(); + } +diff --git a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi +index c0be84a6e81fd..cc7747c5f21f3 100644 +--- a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi ++++ b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi +@@ -99,8 +99,8 @@ liointc1: interrupt-controller@1fe11440 { + rtc0: rtc@1fe07800 { + compatible = "loongson,ls2k1000-rtc"; + reg = <0 0x1fe07800 0 0x78>; +- interrupt-parent = <&liointc0>; +- interrupts = <60 IRQ_TYPE_LEVEL_LOW>; ++ interrupt-parent = <&liointc1>; ++ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>; + }; + + uart0: serial@1fe00000 { +@@ -108,7 +108,7 @@ uart0: serial@1fe00000 { + reg = <0 0x1fe00000 0 0x8>; + clock-frequency = <125000000>; + interrupt-parent = <&liointc0>; +- interrupts = <0 IRQ_TYPE_LEVEL_LOW>; ++ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>; + no-loopback-test; + }; + +@@ -117,7 +117,6 @@ pci@1a000000 { + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; +- #interrupt-cells = <2>; + + reg = <0 0x1a000000 0 0x02000000>, + <0xfe 0x00000000 0 0x20000000>; +@@ -132,8 +131,8 @@ gmac@3,0 { + "pciclass0c03"; + + reg = <0x1800 0x0 0x0 0x0 0x0>; +- interrupts = <12 IRQ_TYPE_LEVEL_LOW>, +- <13 IRQ_TYPE_LEVEL_LOW>; ++ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>, ++ <13 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "macirq", "eth_lpi"; + interrupt-parent = <&liointc0>; + phy-mode = "rgmii-id"; +@@ -156,8 +155,8 @@ gmac@3,1 { + "loongson, pci-gmac"; + + reg = <0x1900 0x0 0x0 0x0 0x0>; +- interrupts = <14 IRQ_TYPE_LEVEL_LOW>, +- <15 IRQ_TYPE_LEVEL_LOW>; ++ interrupts = <14 IRQ_TYPE_LEVEL_HIGH>, ++ <15 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "macirq", "eth_lpi"; + interrupt-parent = <&liointc0>; + phy-mode = "rgmii-id"; +@@ -179,7 +178,7 @@ ehci@4,1 { + "pciclass0c03"; + + reg = <0x2100 0x0 0x0 0x0 0x0>; +- interrupts = <18 IRQ_TYPE_LEVEL_LOW>; ++ interrupts = <18 IRQ_TYPE_LEVEL_HIGH>; + interrupt-parent = <&liointc1>; + }; + +@@ -190,7 +189,7 @@ ohci@4,2 { + "pciclass0c03"; + + reg = <0x2200 0x0 0x0 0x0 0x0>; +- interrupts = <19 IRQ_TYPE_LEVEL_LOW>; ++ interrupts = <19 IRQ_TYPE_LEVEL_HIGH>; + interrupt-parent = <&liointc1>; + }; + +@@ -201,97 +200,121 @@ sata@8,0 { + "pciclass0106"; + + reg = <0x4000 0x0 0x0 0x0 0x0>; +- interrupts = <19 IRQ_TYPE_LEVEL_LOW>; ++ interrupts = <19 IRQ_TYPE_LEVEL_HIGH>; + interrupt-parent = <&liointc0>; + }; + +- pci_bridge@9,0 { ++ pcie@9,0 { + compatible = "pci0014,7a19.0", + "pci0014,7a19", + "pciclass060400", + "pciclass0604"; + + reg = <0x4800 0x0 0x0 0x0 0x0>; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; + #interrupt-cells = <1>; +- interrupts = <0 IRQ_TYPE_LEVEL_LOW>; ++ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>; + interrupt-parent = <&liointc1>; + interrupt-map-mask = <0 0 0 0>; +- interrupt-map = <0 0 0 0 &liointc1 0 IRQ_TYPE_LEVEL_LOW>; ++ interrupt-map = <0 0 0 0 &liointc1 0 IRQ_TYPE_LEVEL_HIGH>; ++ ranges; + external-facing; + }; + +- pci_bridge@a,0 { ++ pcie@a,0 { + compatible = "pci0014,7a09.0", + "pci0014,7a09", + "pciclass060400", + "pciclass0604"; + + reg = <0x5000 0x0 0x0 0x0 0x0>; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; + #interrupt-cells = <1>; +- interrupts = <1 IRQ_TYPE_LEVEL_LOW>; ++ interrupts = <1 IRQ_TYPE_LEVEL_HIGH>; + interrupt-parent = <&liointc1>; + interrupt-map-mask = <0 0 0 0>; +- interrupt-map = <0 0 0 0 &liointc1 1 IRQ_TYPE_LEVEL_LOW>; ++ interrupt-map = <0 0 0 0 &liointc1 1 IRQ_TYPE_LEVEL_HIGH>; ++ ranges; + external-facing; + }; + +- pci_bridge@b,0 { ++ pcie@b,0 { + compatible = "pci0014,7a09.0", + "pci0014,7a09", + "pciclass060400", + "pciclass0604"; + + reg = <0x5800 0x0 0x0 0x0 0x0>; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; + #interrupt-cells = <1>; +- interrupts = <2 IRQ_TYPE_LEVEL_LOW>; ++ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; + interrupt-parent = <&liointc1>; + interrupt-map-mask = <0 0 0 0>; +- interrupt-map = <0 0 0 0 &liointc1 2 IRQ_TYPE_LEVEL_LOW>; ++ interrupt-map = <0 0 0 0 &liointc1 2 IRQ_TYPE_LEVEL_HIGH>; ++ ranges; + external-facing; + }; + +- pci_bridge@c,0 { ++ pcie@c,0 { + compatible = "pci0014,7a09.0", + "pci0014,7a09", + "pciclass060400", + "pciclass0604"; + + reg = <0x6000 0x0 0x0 0x0 0x0>; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; + #interrupt-cells = <1>; +- interrupts = <3 IRQ_TYPE_LEVEL_LOW>; ++ interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; + interrupt-parent = <&liointc1>; + interrupt-map-mask = <0 0 0 0>; +- interrupt-map = <0 0 0 0 &liointc1 3 IRQ_TYPE_LEVEL_LOW>; ++ interrupt-map = <0 0 0 0 &liointc1 3 IRQ_TYPE_LEVEL_HIGH>; ++ ranges; + external-facing; + }; + +- pci_bridge@d,0 { ++ pcie@d,0 { + compatible = "pci0014,7a19.0", + "pci0014,7a19", + "pciclass060400", + "pciclass0604"; + + reg = <0x6800 0x0 0x0 0x0 0x0>; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; + #interrupt-cells = <1>; +- interrupts = <4 IRQ_TYPE_LEVEL_LOW>; ++ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>; + interrupt-parent = <&liointc1>; + interrupt-map-mask = <0 0 0 0>; +- interrupt-map = <0 0 0 0 &liointc1 4 IRQ_TYPE_LEVEL_LOW>; ++ interrupt-map = <0 0 0 0 &liointc1 4 IRQ_TYPE_LEVEL_HIGH>; ++ ranges; + external-facing; + }; + +- pci_bridge@e,0 { ++ pcie@e,0 { + compatible = "pci0014,7a09.0", + "pci0014,7a09", + "pciclass060400", + "pciclass0604"; + + reg = <0x7000 0x0 0x0 0x0 0x0>; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; + #interrupt-cells = <1>; +- interrupts = <5 IRQ_TYPE_LEVEL_LOW>; ++ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>; + interrupt-parent = <&liointc1>; + interrupt-map-mask = <0 0 0 0>; +- interrupt-map = <0 0 0 0 &liointc1 5 IRQ_TYPE_LEVEL_LOW>; ++ interrupt-map = <0 0 0 0 &liointc1 5 IRQ_TYPE_LEVEL_HIGH>; ++ ranges; + external-facing; + }; + +diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c +index 5348d842c7453..e867fe465164e 100644 +--- a/arch/riscv/kernel/traps_misaligned.c ++++ b/arch/riscv/kernel/traps_misaligned.c +@@ -151,51 +151,19 @@ + #define PRECISION_S 0 + #define PRECISION_D 1 + +-#define DECLARE_UNPRIVILEGED_LOAD_FUNCTION(type, insn) \ +-static inline type load_##type(const type *addr) \ +-{ \ +- type val; \ +- asm (#insn " %0, %1" \ +- : "=&r" (val) : "m" (*addr)); \ +- return val; \ +-} ++static inline u8 load_u8(const u8 *addr) ++{ ++ u8 val; + +-#define DECLARE_UNPRIVILEGED_STORE_FUNCTION(type, insn) \ +-static inline void store_##type(type *addr, type val) \ +-{ \ +- asm volatile (#insn " %0, %1\n" \ +- : : "r" (val), "m" (*addr)); \ +-} ++ asm volatile("lbu %0, %1" : "=&r" (val) : "m" (*addr)); + +-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u8, lbu) +-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u16, lhu) +-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s8, lb) +-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s16, lh) +-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s32, lw) +-DECLARE_UNPRIVILEGED_STORE_FUNCTION(u8, sb) +-DECLARE_UNPRIVILEGED_STORE_FUNCTION(u16, sh) +-DECLARE_UNPRIVILEGED_STORE_FUNCTION(u32, sw) +-#if defined(CONFIG_64BIT) +-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lwu) +-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u64, ld) +-DECLARE_UNPRIVILEGED_STORE_FUNCTION(u64, sd) +-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, ld) +-#else +-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lw) +-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, lw) +- +-static inline u64 load_u64(const u64 *addr) +-{ +- return load_u32((u32 *)addr) +- + ((u64)load_u32((u32 *)addr + 1) << 32); ++ return val; + } + +-static inline void store_u64(u64 *addr, u64 val) ++static inline void store_u8(u8 *addr, u8 val) + { +- store_u32((u32 *)addr, val); +- store_u32((u32 *)addr + 1, val >> 32); ++ asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr)); + } +-#endif + + static inline ulong get_insn(ulong mepc) + { +diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c +index 90d4ba36d1d06..655b2b1bb529f 100644 +--- a/arch/riscv/mm/fault.c ++++ b/arch/riscv/mm/fault.c +@@ -61,26 +61,27 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr) + + static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) + { ++ if (!user_mode(regs)) { ++ no_context(regs, addr); ++ return; ++ } ++ + if (fault & VM_FAULT_OOM) { + /* + * We ran out of memory, call the OOM killer, and return the userspace + * (which will retry the fault, or kill us if we got oom-killed). + */ +- if (!user_mode(regs)) { +- no_context(regs, addr); +- return; +- } + pagefault_out_of_memory(); + return; + } else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) { + /* Kernel mode? Handle exceptions or die */ +- if (!user_mode(regs)) { +- no_context(regs, addr); +- return; +- } + do_trap(regs, SIGBUS, BUS_ADRERR, addr); + return; ++ } else if (fault & VM_FAULT_SIGSEGV) { ++ do_trap(regs, SIGSEGV, SEGV_MAPERR, addr); ++ return; + } ++ + BUG(); + } + +diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c +index 8adcb9419ad50..9b10e9655df8c 100644 +--- a/arch/riscv/mm/init.c ++++ b/arch/riscv/mm/init.c +@@ -217,8 +217,6 @@ static void __init setup_bootmem(void) + */ + memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); + +- phys_ram_end = memblock_end_of_DRAM(); +- + /* + * Make sure we align the start of the memory on a PMD boundary so that + * at worst, we map the linear mapping with PMD mappings. +@@ -233,6 +231,16 @@ static void __init setup_bootmem(void) + if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU)) + kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base; + ++ /* ++ * The size of the linear page mapping may restrict the amount of ++ * usable RAM. ++ */ ++ if (IS_ENABLED(CONFIG_64BIT)) { ++ max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE; ++ memblock_cap_memory_range(phys_ram_base, ++ max_mapped_addr - phys_ram_base); ++ } ++ + /* + * Reserve physical address space that would be mapped to virtual + * addresses greater than (void *)(-PAGE_SIZE) because: +@@ -249,6 +257,7 @@ static void __init setup_bootmem(void) + memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr); + } + ++ phys_ram_end = memblock_end_of_DRAM(); + min_low_pfn = PFN_UP(phys_ram_base); + max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end); + high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); +@@ -1269,8 +1278,6 @@ static void __init create_linear_mapping_page_table(void) + if (start <= __pa(PAGE_OFFSET) && + __pa(PAGE_OFFSET) < end) + start = __pa(PAGE_OFFSET); +- if (end >= __pa(PAGE_OFFSET) + memory_limit) +- end = __pa(PAGE_OFFSET) + memory_limit; + + create_linear_mapping_range(start, end, 0); + } +diff --git a/arch/x86/include/asm/posted_intr.h b/arch/x86/include/asm/posted_intr.h +new file mode 100644 +index 0000000000000..f0324c56f7af5 +--- /dev/null ++++ b/arch/x86/include/asm/posted_intr.h +@@ -0,0 +1,88 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef _X86_POSTED_INTR_H ++#define _X86_POSTED_INTR_H ++ ++#define POSTED_INTR_ON 0 ++#define POSTED_INTR_SN 1 ++ ++#define PID_TABLE_ENTRY_VALID 1 ++ ++/* Posted-Interrupt Descriptor */ ++struct pi_desc { ++ u32 pir[8]; /* Posted interrupt requested */ ++ union { ++ struct { ++ /* bit 256 - Outstanding Notification */ ++ u16 on : 1, ++ /* bit 257 - Suppress Notification */ ++ sn : 1, ++ /* bit 271:258 - Reserved */ ++ rsvd_1 : 14; ++ /* bit 279:272 - Notification Vector */ ++ u8 nv; ++ /* bit 287:280 - Reserved */ ++ u8 rsvd_2; ++ /* bit 319:288 - Notification Destination */ ++ u32 ndst; ++ }; ++ u64 control; ++ }; ++ u32 rsvd[6]; ++} __aligned(64); ++ ++static inline bool pi_test_and_set_on(struct pi_desc *pi_desc) ++{ ++ return test_and_set_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); ++} ++ ++static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc) ++{ ++ return test_and_clear_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); ++} ++ ++static inline bool pi_test_and_clear_sn(struct pi_desc *pi_desc) ++{ ++ return test_and_clear_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control); ++} ++ ++static inline bool pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) ++{ ++ return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); ++} ++ ++static inline bool pi_is_pir_empty(struct pi_desc *pi_desc) ++{ ++ return bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS); ++} ++ ++static inline void pi_set_sn(struct pi_desc *pi_desc) ++{ ++ set_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control); ++} ++ ++static inline void pi_set_on(struct pi_desc *pi_desc) ++{ ++ set_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); ++} ++ ++static inline void pi_clear_on(struct pi_desc *pi_desc) ++{ ++ clear_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); ++} ++ ++static inline void pi_clear_sn(struct pi_desc *pi_desc) ++{ ++ clear_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control); ++} ++ ++static inline bool pi_test_on(struct pi_desc *pi_desc) ++{ ++ return test_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); ++} ++ ++static inline bool pi_test_sn(struct pi_desc *pi_desc) ++{ ++ return test_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control); ++} ++ ++#endif /* _X86_POSTED_INTR_H */ +diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile +index 80e3fe184d17e..a99ffc3f3a3fd 100644 +--- a/arch/x86/kvm/Makefile ++++ b/arch/x86/kvm/Makefile +@@ -26,6 +26,10 @@ kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \ + vmx/hyperv.o vmx/nested.o vmx/posted_intr.o + kvm-intel-$(CONFIG_X86_SGX_KVM) += vmx/sgx.o + ++ifdef CONFIG_HYPERV ++kvm-intel-y += vmx/vmx_onhyperv.o ++endif ++ + kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o \ + svm/sev.o svm/hyperv.o + +diff --git a/arch/x86/kvm/vmx/hyperv.c b/arch/x86/kvm/vmx/hyperv.c +index 313b8bb5b8a7c..de13dc14fe1d2 100644 +--- a/arch/x86/kvm/vmx/hyperv.c ++++ b/arch/x86/kvm/vmx/hyperv.c +@@ -13,111 +13,6 @@ + + #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK + +-/* +- * Enlightened VMCSv1 doesn't support these: +- * +- * POSTED_INTR_NV = 0x00000002, +- * GUEST_INTR_STATUS = 0x00000810, +- * APIC_ACCESS_ADDR = 0x00002014, +- * POSTED_INTR_DESC_ADDR = 0x00002016, +- * EOI_EXIT_BITMAP0 = 0x0000201c, +- * EOI_EXIT_BITMAP1 = 0x0000201e, +- * EOI_EXIT_BITMAP2 = 0x00002020, +- * EOI_EXIT_BITMAP3 = 0x00002022, +- * GUEST_PML_INDEX = 0x00000812, +- * PML_ADDRESS = 0x0000200e, +- * VM_FUNCTION_CONTROL = 0x00002018, +- * EPTP_LIST_ADDRESS = 0x00002024, +- * VMREAD_BITMAP = 0x00002026, +- * VMWRITE_BITMAP = 0x00002028, +- * +- * TSC_MULTIPLIER = 0x00002032, +- * PLE_GAP = 0x00004020, +- * PLE_WINDOW = 0x00004022, +- * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E, +- * +- * Currently unsupported in KVM: +- * GUEST_IA32_RTIT_CTL = 0x00002814, +- */ +-#define EVMCS1_SUPPORTED_PINCTRL \ +- (PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | \ +- PIN_BASED_EXT_INTR_MASK | \ +- PIN_BASED_NMI_EXITING | \ +- PIN_BASED_VIRTUAL_NMIS) +- +-#define EVMCS1_SUPPORTED_EXEC_CTRL \ +- (CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | \ +- CPU_BASED_HLT_EXITING | \ +- CPU_BASED_CR3_LOAD_EXITING | \ +- CPU_BASED_CR3_STORE_EXITING | \ +- CPU_BASED_UNCOND_IO_EXITING | \ +- CPU_BASED_MOV_DR_EXITING | \ +- CPU_BASED_USE_TSC_OFFSETTING | \ +- CPU_BASED_MWAIT_EXITING | \ +- CPU_BASED_MONITOR_EXITING | \ +- CPU_BASED_INVLPG_EXITING | \ +- CPU_BASED_RDPMC_EXITING | \ +- CPU_BASED_INTR_WINDOW_EXITING | \ +- CPU_BASED_CR8_LOAD_EXITING | \ +- CPU_BASED_CR8_STORE_EXITING | \ +- CPU_BASED_RDTSC_EXITING | \ +- CPU_BASED_TPR_SHADOW | \ +- CPU_BASED_USE_IO_BITMAPS | \ +- CPU_BASED_MONITOR_TRAP_FLAG | \ +- CPU_BASED_USE_MSR_BITMAPS | \ +- CPU_BASED_NMI_WINDOW_EXITING | \ +- CPU_BASED_PAUSE_EXITING | \ +- CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) +- +-#define EVMCS1_SUPPORTED_2NDEXEC \ +- (SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | \ +- SECONDARY_EXEC_WBINVD_EXITING | \ +- SECONDARY_EXEC_ENABLE_VPID | \ +- SECONDARY_EXEC_ENABLE_EPT | \ +- SECONDARY_EXEC_UNRESTRICTED_GUEST | \ +- SECONDARY_EXEC_DESC | \ +- SECONDARY_EXEC_ENABLE_RDTSCP | \ +- SECONDARY_EXEC_ENABLE_INVPCID | \ +- SECONDARY_EXEC_ENABLE_XSAVES | \ +- SECONDARY_EXEC_RDSEED_EXITING | \ +- SECONDARY_EXEC_RDRAND_EXITING | \ +- SECONDARY_EXEC_TSC_SCALING | \ +- SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | \ +- SECONDARY_EXEC_PT_USE_GPA | \ +- SECONDARY_EXEC_PT_CONCEAL_VMX | \ +- SECONDARY_EXEC_BUS_LOCK_DETECTION | \ +- SECONDARY_EXEC_NOTIFY_VM_EXITING | \ +- SECONDARY_EXEC_ENCLS_EXITING) +- +-#define EVMCS1_SUPPORTED_3RDEXEC (0ULL) +- +-#define EVMCS1_SUPPORTED_VMEXIT_CTRL \ +- (VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | \ +- VM_EXIT_SAVE_DEBUG_CONTROLS | \ +- VM_EXIT_ACK_INTR_ON_EXIT | \ +- VM_EXIT_HOST_ADDR_SPACE_SIZE | \ +- VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \ +- VM_EXIT_SAVE_IA32_PAT | \ +- VM_EXIT_LOAD_IA32_PAT | \ +- VM_EXIT_SAVE_IA32_EFER | \ +- VM_EXIT_LOAD_IA32_EFER | \ +- VM_EXIT_CLEAR_BNDCFGS | \ +- VM_EXIT_PT_CONCEAL_PIP | \ +- VM_EXIT_CLEAR_IA32_RTIT_CTL) +- +-#define EVMCS1_SUPPORTED_VMENTRY_CTRL \ +- (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | \ +- VM_ENTRY_LOAD_DEBUG_CONTROLS | \ +- VM_ENTRY_IA32E_MODE | \ +- VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | \ +- VM_ENTRY_LOAD_IA32_PAT | \ +- VM_ENTRY_LOAD_IA32_EFER | \ +- VM_ENTRY_LOAD_BNDCFGS | \ +- VM_ENTRY_PT_CONCEAL_PIP | \ +- VM_ENTRY_LOAD_IA32_RTIT_CTL) +- +-#define EVMCS1_SUPPORTED_VMFUNC (0) +- + #define EVMCS1_OFFSET(x) offsetof(struct hv_enlightened_vmcs, x) + #define EVMCS1_FIELD(number, name, clean_field)[ROL16(number, 6)] = \ + {EVMCS1_OFFSET(name), clean_field} +@@ -608,40 +503,6 @@ int nested_evmcs_check_controls(struct vmcs12 *vmcs12) + return 0; + } + +-#if IS_ENABLED(CONFIG_HYPERV) +-DEFINE_STATIC_KEY_FALSE(__kvm_is_using_evmcs); +- +-/* +- * KVM on Hyper-V always uses the latest known eVMCSv1 revision, the assumption +- * is: in case a feature has corresponding fields in eVMCS described and it was +- * exposed in VMX feature MSRs, KVM is free to use it. Warn if KVM meets a +- * feature which has no corresponding eVMCS field, this likely means that KVM +- * needs to be updated. +- */ +-#define evmcs_check_vmcs_conf(field, ctrl) \ +- do { \ +- typeof(vmcs_conf->field) unsupported; \ +- \ +- unsupported = vmcs_conf->field & ~EVMCS1_SUPPORTED_ ## ctrl; \ +- if (unsupported) { \ +- pr_warn_once(#field " unsupported with eVMCS: 0x%llx\n",\ +- (u64)unsupported); \ +- vmcs_conf->field &= EVMCS1_SUPPORTED_ ## ctrl; \ +- } \ +- } \ +- while (0) +- +-void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) +-{ +- evmcs_check_vmcs_conf(cpu_based_exec_ctrl, EXEC_CTRL); +- evmcs_check_vmcs_conf(pin_based_exec_ctrl, PINCTRL); +- evmcs_check_vmcs_conf(cpu_based_2nd_exec_ctrl, 2NDEXEC); +- evmcs_check_vmcs_conf(cpu_based_3rd_exec_ctrl, 3RDEXEC); +- evmcs_check_vmcs_conf(vmentry_ctrl, VMENTRY_CTRL); +- evmcs_check_vmcs_conf(vmexit_ctrl, VMEXIT_CTRL); +-} +-#endif +- + int nested_enable_evmcs(struct kvm_vcpu *vcpu, + uint16_t *vmcs_version) + { +diff --git a/arch/x86/kvm/vmx/hyperv.h b/arch/x86/kvm/vmx/hyperv.h +index 9623fe1651c48..9401dbfaea7ce 100644 +--- a/arch/x86/kvm/vmx/hyperv.h ++++ b/arch/x86/kvm/vmx/hyperv.h +@@ -14,12 +14,113 @@ + #include "vmcs.h" + #include "vmcs12.h" + +-struct vmcs_config; +- +-#define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs)) +- + #define KVM_EVMCS_VERSION 1 + ++/* ++ * Enlightened VMCSv1 doesn't support these: ++ * ++ * POSTED_INTR_NV = 0x00000002, ++ * GUEST_INTR_STATUS = 0x00000810, ++ * APIC_ACCESS_ADDR = 0x00002014, ++ * POSTED_INTR_DESC_ADDR = 0x00002016, ++ * EOI_EXIT_BITMAP0 = 0x0000201c, ++ * EOI_EXIT_BITMAP1 = 0x0000201e, ++ * EOI_EXIT_BITMAP2 = 0x00002020, ++ * EOI_EXIT_BITMAP3 = 0x00002022, ++ * GUEST_PML_INDEX = 0x00000812, ++ * PML_ADDRESS = 0x0000200e, ++ * VM_FUNCTION_CONTROL = 0x00002018, ++ * EPTP_LIST_ADDRESS = 0x00002024, ++ * VMREAD_BITMAP = 0x00002026, ++ * VMWRITE_BITMAP = 0x00002028, ++ * ++ * TSC_MULTIPLIER = 0x00002032, ++ * PLE_GAP = 0x00004020, ++ * PLE_WINDOW = 0x00004022, ++ * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E, ++ * ++ * Currently unsupported in KVM: ++ * GUEST_IA32_RTIT_CTL = 0x00002814, ++ */ ++#define EVMCS1_SUPPORTED_PINCTRL \ ++ (PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | \ ++ PIN_BASED_EXT_INTR_MASK | \ ++ PIN_BASED_NMI_EXITING | \ ++ PIN_BASED_VIRTUAL_NMIS) ++ ++#define EVMCS1_SUPPORTED_EXEC_CTRL \ ++ (CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | \ ++ CPU_BASED_HLT_EXITING | \ ++ CPU_BASED_CR3_LOAD_EXITING | \ ++ CPU_BASED_CR3_STORE_EXITING | \ ++ CPU_BASED_UNCOND_IO_EXITING | \ ++ CPU_BASED_MOV_DR_EXITING | \ ++ CPU_BASED_USE_TSC_OFFSETTING | \ ++ CPU_BASED_MWAIT_EXITING | \ ++ CPU_BASED_MONITOR_EXITING | \ ++ CPU_BASED_INVLPG_EXITING | \ ++ CPU_BASED_RDPMC_EXITING | \ ++ CPU_BASED_INTR_WINDOW_EXITING | \ ++ CPU_BASED_CR8_LOAD_EXITING | \ ++ CPU_BASED_CR8_STORE_EXITING | \ ++ CPU_BASED_RDTSC_EXITING | \ ++ CPU_BASED_TPR_SHADOW | \ ++ CPU_BASED_USE_IO_BITMAPS | \ ++ CPU_BASED_MONITOR_TRAP_FLAG | \ ++ CPU_BASED_USE_MSR_BITMAPS | \ ++ CPU_BASED_NMI_WINDOW_EXITING | \ ++ CPU_BASED_PAUSE_EXITING | \ ++ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) ++ ++#define EVMCS1_SUPPORTED_2NDEXEC \ ++ (SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | \ ++ SECONDARY_EXEC_WBINVD_EXITING | \ ++ SECONDARY_EXEC_ENABLE_VPID | \ ++ SECONDARY_EXEC_ENABLE_EPT | \ ++ SECONDARY_EXEC_UNRESTRICTED_GUEST | \ ++ SECONDARY_EXEC_DESC | \ ++ SECONDARY_EXEC_ENABLE_RDTSCP | \ ++ SECONDARY_EXEC_ENABLE_INVPCID | \ ++ SECONDARY_EXEC_ENABLE_XSAVES | \ ++ SECONDARY_EXEC_RDSEED_EXITING | \ ++ SECONDARY_EXEC_RDRAND_EXITING | \ ++ SECONDARY_EXEC_TSC_SCALING | \ ++ SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | \ ++ SECONDARY_EXEC_PT_USE_GPA | \ ++ SECONDARY_EXEC_PT_CONCEAL_VMX | \ ++ SECONDARY_EXEC_BUS_LOCK_DETECTION | \ ++ SECONDARY_EXEC_NOTIFY_VM_EXITING | \ ++ SECONDARY_EXEC_ENCLS_EXITING) ++ ++#define EVMCS1_SUPPORTED_3RDEXEC (0ULL) ++ ++#define EVMCS1_SUPPORTED_VMEXIT_CTRL \ ++ (VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | \ ++ VM_EXIT_SAVE_DEBUG_CONTROLS | \ ++ VM_EXIT_ACK_INTR_ON_EXIT | \ ++ VM_EXIT_HOST_ADDR_SPACE_SIZE | \ ++ VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \ ++ VM_EXIT_SAVE_IA32_PAT | \ ++ VM_EXIT_LOAD_IA32_PAT | \ ++ VM_EXIT_SAVE_IA32_EFER | \ ++ VM_EXIT_LOAD_IA32_EFER | \ ++ VM_EXIT_CLEAR_BNDCFGS | \ ++ VM_EXIT_PT_CONCEAL_PIP | \ ++ VM_EXIT_CLEAR_IA32_RTIT_CTL) ++ ++#define EVMCS1_SUPPORTED_VMENTRY_CTRL \ ++ (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | \ ++ VM_ENTRY_LOAD_DEBUG_CONTROLS | \ ++ VM_ENTRY_IA32E_MODE | \ ++ VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | \ ++ VM_ENTRY_LOAD_IA32_PAT | \ ++ VM_ENTRY_LOAD_IA32_EFER | \ ++ VM_ENTRY_LOAD_BNDCFGS | \ ++ VM_ENTRY_PT_CONCEAL_PIP | \ ++ VM_ENTRY_LOAD_IA32_RTIT_CTL) ++ ++#define EVMCS1_SUPPORTED_VMFUNC (0) ++ + struct evmcs_field { + u16 offset; + u16 clean_field; +@@ -65,114 +166,6 @@ static inline u64 evmcs_read_any(struct hv_enlightened_vmcs *evmcs, + return vmcs12_read_any((void *)evmcs, field, offset); + } + +-#if IS_ENABLED(CONFIG_HYPERV) +- +-DECLARE_STATIC_KEY_FALSE(__kvm_is_using_evmcs); +- +-static __always_inline bool kvm_is_using_evmcs(void) +-{ +- return static_branch_unlikely(&__kvm_is_using_evmcs); +-} +- +-static __always_inline int get_evmcs_offset(unsigned long field, +- u16 *clean_field) +-{ +- int offset = evmcs_field_offset(field, clean_field); +- +- WARN_ONCE(offset < 0, "accessing unsupported EVMCS field %lx\n", field); +- return offset; +-} +- +-static __always_inline void evmcs_write64(unsigned long field, u64 value) +-{ +- u16 clean_field; +- int offset = get_evmcs_offset(field, &clean_field); +- +- if (offset < 0) +- return; +- +- *(u64 *)((char *)current_evmcs + offset) = value; +- +- current_evmcs->hv_clean_fields &= ~clean_field; +-} +- +-static __always_inline void evmcs_write32(unsigned long field, u32 value) +-{ +- u16 clean_field; +- int offset = get_evmcs_offset(field, &clean_field); +- +- if (offset < 0) +- return; +- +- *(u32 *)((char *)current_evmcs + offset) = value; +- current_evmcs->hv_clean_fields &= ~clean_field; +-} +- +-static __always_inline void evmcs_write16(unsigned long field, u16 value) +-{ +- u16 clean_field; +- int offset = get_evmcs_offset(field, &clean_field); +- +- if (offset < 0) +- return; +- +- *(u16 *)((char *)current_evmcs + offset) = value; +- current_evmcs->hv_clean_fields &= ~clean_field; +-} +- +-static __always_inline u64 evmcs_read64(unsigned long field) +-{ +- int offset = get_evmcs_offset(field, NULL); +- +- if (offset < 0) +- return 0; +- +- return *(u64 *)((char *)current_evmcs + offset); +-} +- +-static __always_inline u32 evmcs_read32(unsigned long field) +-{ +- int offset = get_evmcs_offset(field, NULL); +- +- if (offset < 0) +- return 0; +- +- return *(u32 *)((char *)current_evmcs + offset); +-} +- +-static __always_inline u16 evmcs_read16(unsigned long field) +-{ +- int offset = get_evmcs_offset(field, NULL); +- +- if (offset < 0) +- return 0; +- +- return *(u16 *)((char *)current_evmcs + offset); +-} +- +-static inline void evmcs_load(u64 phys_addr) +-{ +- struct hv_vp_assist_page *vp_ap = +- hv_get_vp_assist_page(smp_processor_id()); +- +- if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall) +- vp_ap->nested_control.features.directhypercall = 1; +- vp_ap->current_nested_vmcs = phys_addr; +- vp_ap->enlighten_vmentry = 1; +-} +- +-void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf); +-#else /* !IS_ENABLED(CONFIG_HYPERV) */ +-static __always_inline bool kvm_is_using_evmcs(void) { return false; } +-static __always_inline void evmcs_write64(unsigned long field, u64 value) {} +-static __always_inline void evmcs_write32(unsigned long field, u32 value) {} +-static __always_inline void evmcs_write16(unsigned long field, u16 value) {} +-static __always_inline u64 evmcs_read64(unsigned long field) { return 0; } +-static __always_inline u32 evmcs_read32(unsigned long field) { return 0; } +-static __always_inline u16 evmcs_read16(unsigned long field) { return 0; } +-static inline void evmcs_load(u64 phys_addr) {} +-#endif /* IS_ENABLED(CONFIG_HYPERV) */ +- + #define EVMPTR_INVALID (-1ULL) + #define EVMPTR_MAP_PENDING (-2ULL) + +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c +index d1b4a85def0a6..0ad66b9207e85 100644 +--- a/arch/x86/kvm/vmx/nested.c ++++ b/arch/x86/kvm/vmx/nested.c +@@ -12,6 +12,7 @@ + #include "mmu.h" + #include "nested.h" + #include "pmu.h" ++#include "posted_intr.h" + #include "sgx.h" + #include "trace.h" + #include "vmx.h" +@@ -3830,8 +3831,8 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) + if (!pi_test_and_clear_on(vmx->nested.pi_desc)) + return 0; + +- max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); +- if (max_irr != 256) { ++ max_irr = pi_find_highest_vector(vmx->nested.pi_desc); ++ if (max_irr > 0) { + vapic_page = vmx->nested.virtual_apic_map.hva; + if (!vapic_page) + goto mmio_needed; +@@ -3964,8 +3965,40 @@ static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu) + + static bool vmx_has_nested_events(struct kvm_vcpu *vcpu, bool for_injection) + { +- return nested_vmx_preemption_timer_pending(vcpu) || +- to_vmx(vcpu)->nested.mtf_pending; ++ struct vcpu_vmx *vmx = to_vmx(vcpu); ++ void *vapic = vmx->nested.virtual_apic_map.hva; ++ int max_irr, vppr; ++ ++ if (nested_vmx_preemption_timer_pending(vcpu) || ++ vmx->nested.mtf_pending) ++ return true; ++ ++ /* ++ * Virtual Interrupt Delivery doesn't require manual injection. Either ++ * the interrupt is already in GUEST_RVI and will be recognized by CPU ++ * at VM-Entry, or there is a KVM_REQ_EVENT pending and KVM will move ++ * the interrupt from the PIR to RVI prior to entering the guest. ++ */ ++ if (for_injection) ++ return false; ++ ++ if (!nested_cpu_has_vid(get_vmcs12(vcpu)) || ++ __vmx_interrupt_blocked(vcpu)) ++ return false; ++ ++ if (!vapic) ++ return false; ++ ++ vppr = *((u32 *)(vapic + APIC_PROCPRI)); ++ ++ if (vmx->nested.pi_pending && vmx->nested.pi_desc && ++ pi_test_on(vmx->nested.pi_desc)) { ++ max_irr = pi_find_highest_vector(vmx->nested.pi_desc); ++ if (max_irr > 0 && (max_irr & 0xf0) > (vppr & 0xf0)) ++ return true; ++ } ++ ++ return false; + } + + /* +diff --git a/arch/x86/kvm/vmx/posted_intr.h b/arch/x86/kvm/vmx/posted_intr.h +index 26992076552ef..1715d2ab07be5 100644 +--- a/arch/x86/kvm/vmx/posted_intr.h ++++ b/arch/x86/kvm/vmx/posted_intr.h +@@ -2,97 +2,8 @@ + #ifndef __KVM_X86_VMX_POSTED_INTR_H + #define __KVM_X86_VMX_POSTED_INTR_H + +-#define POSTED_INTR_ON 0 +-#define POSTED_INTR_SN 1 +- +-#define PID_TABLE_ENTRY_VALID 1 +- +-/* Posted-Interrupt Descriptor */ +-struct pi_desc { +- u32 pir[8]; /* Posted interrupt requested */ +- union { +- struct { +- /* bit 256 - Outstanding Notification */ +- u16 on : 1, +- /* bit 257 - Suppress Notification */ +- sn : 1, +- /* bit 271:258 - Reserved */ +- rsvd_1 : 14; +- /* bit 279:272 - Notification Vector */ +- u8 nv; +- /* bit 287:280 - Reserved */ +- u8 rsvd_2; +- /* bit 319:288 - Notification Destination */ +- u32 ndst; +- }; +- u64 control; +- }; +- u32 rsvd[6]; +-} __aligned(64); +- +-static inline bool pi_test_and_set_on(struct pi_desc *pi_desc) +-{ +- return test_and_set_bit(POSTED_INTR_ON, +- (unsigned long *)&pi_desc->control); +-} +- +-static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc) +-{ +- return test_and_clear_bit(POSTED_INTR_ON, +- (unsigned long *)&pi_desc->control); +-} +- +-static inline bool pi_test_and_clear_sn(struct pi_desc *pi_desc) +-{ +- return test_and_clear_bit(POSTED_INTR_SN, +- (unsigned long *)&pi_desc->control); +-} +- +-static inline bool pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) +-{ +- return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); +-} +- +-static inline bool pi_is_pir_empty(struct pi_desc *pi_desc) +-{ +- return bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS); +-} +- +-static inline void pi_set_sn(struct pi_desc *pi_desc) +-{ +- set_bit(POSTED_INTR_SN, +- (unsigned long *)&pi_desc->control); +-} +- +-static inline void pi_set_on(struct pi_desc *pi_desc) +-{ +- set_bit(POSTED_INTR_ON, +- (unsigned long *)&pi_desc->control); +-} +- +-static inline void pi_clear_on(struct pi_desc *pi_desc) +-{ +- clear_bit(POSTED_INTR_ON, +- (unsigned long *)&pi_desc->control); +-} +- +-static inline void pi_clear_sn(struct pi_desc *pi_desc) +-{ +- clear_bit(POSTED_INTR_SN, +- (unsigned long *)&pi_desc->control); +-} +- +-static inline bool pi_test_on(struct pi_desc *pi_desc) +-{ +- return test_bit(POSTED_INTR_ON, +- (unsigned long *)&pi_desc->control); +-} +- +-static inline bool pi_test_sn(struct pi_desc *pi_desc) +-{ +- return test_bit(POSTED_INTR_SN, +- (unsigned long *)&pi_desc->control); +-} ++#include ++#include + + void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu); + void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu); +@@ -103,4 +14,12 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq, + uint32_t guest_irq, bool set); + void vmx_pi_start_assignment(struct kvm *kvm); + ++static inline int pi_find_highest_vector(struct pi_desc *pi_desc) ++{ ++ int vec; ++ ++ vec = find_last_bit((unsigned long *)pi_desc->pir, 256); ++ return vec < 256 ? vec : -1; ++} ++ + #endif /* __KVM_X86_VMX_POSTED_INTR_H */ +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index f5f652a546bf2..2e0106d9d371c 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -66,6 +66,8 @@ + #include "vmx.h" + #include "x86.h" + #include "smm.h" ++#include "vmx_onhyperv.h" ++#include "posted_intr.h" + + MODULE_AUTHOR("Qumranet"); + MODULE_LICENSE("GPL"); +diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h +index 912b0c4697429..6be1627d888e5 100644 +--- a/arch/x86/kvm/vmx/vmx.h ++++ b/arch/x86/kvm/vmx/vmx.h +@@ -7,10 +7,10 @@ + #include + #include + #include ++#include + + #include "capabilities.h" + #include "../kvm_cache_regs.h" +-#include "posted_intr.h" + #include "vmcs.h" + #include "vmx_ops.h" + #include "../cpuid.h" +diff --git a/arch/x86/kvm/vmx/vmx_onhyperv.c b/arch/x86/kvm/vmx/vmx_onhyperv.c +new file mode 100644 +index 0000000000000..b9a8b91166d02 +--- /dev/null ++++ b/arch/x86/kvm/vmx/vmx_onhyperv.c +@@ -0,0 +1,36 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++ ++#include "capabilities.h" ++#include "vmx_onhyperv.h" ++ ++DEFINE_STATIC_KEY_FALSE(__kvm_is_using_evmcs); ++ ++/* ++ * KVM on Hyper-V always uses the latest known eVMCSv1 revision, the assumption ++ * is: in case a feature has corresponding fields in eVMCS described and it was ++ * exposed in VMX feature MSRs, KVM is free to use it. Warn if KVM meets a ++ * feature which has no corresponding eVMCS field, this likely means that KVM ++ * needs to be updated. ++ */ ++#define evmcs_check_vmcs_conf(field, ctrl) \ ++ do { \ ++ typeof(vmcs_conf->field) unsupported; \ ++ \ ++ unsupported = vmcs_conf->field & ~EVMCS1_SUPPORTED_ ## ctrl; \ ++ if (unsupported) { \ ++ pr_warn_once(#field " unsupported with eVMCS: 0x%llx\n",\ ++ (u64)unsupported); \ ++ vmcs_conf->field &= EVMCS1_SUPPORTED_ ## ctrl; \ ++ } \ ++ } \ ++ while (0) ++ ++void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) ++{ ++ evmcs_check_vmcs_conf(cpu_based_exec_ctrl, EXEC_CTRL); ++ evmcs_check_vmcs_conf(pin_based_exec_ctrl, PINCTRL); ++ evmcs_check_vmcs_conf(cpu_based_2nd_exec_ctrl, 2NDEXEC); ++ evmcs_check_vmcs_conf(cpu_based_3rd_exec_ctrl, 3RDEXEC); ++ evmcs_check_vmcs_conf(vmentry_ctrl, VMENTRY_CTRL); ++ evmcs_check_vmcs_conf(vmexit_ctrl, VMEXIT_CTRL); ++} +diff --git a/arch/x86/kvm/vmx/vmx_onhyperv.h b/arch/x86/kvm/vmx/vmx_onhyperv.h +new file mode 100644 +index 0000000000000..11541d272dbd8 +--- /dev/null ++++ b/arch/x86/kvm/vmx/vmx_onhyperv.h +@@ -0,0 +1,124 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++ ++#ifndef __ARCH_X86_KVM_VMX_ONHYPERV_H__ ++#define __ARCH_X86_KVM_VMX_ONHYPERV_H__ ++ ++#include ++ ++#include ++ ++#include "capabilities.h" ++#include "hyperv.h" ++#include "vmcs12.h" ++ ++#define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs)) ++ ++#if IS_ENABLED(CONFIG_HYPERV) ++ ++DECLARE_STATIC_KEY_FALSE(__kvm_is_using_evmcs); ++ ++static __always_inline bool kvm_is_using_evmcs(void) ++{ ++ return static_branch_unlikely(&__kvm_is_using_evmcs); ++} ++ ++static __always_inline int get_evmcs_offset(unsigned long field, ++ u16 *clean_field) ++{ ++ int offset = evmcs_field_offset(field, clean_field); ++ ++ WARN_ONCE(offset < 0, "accessing unsupported EVMCS field %lx\n", field); ++ return offset; ++} ++ ++static __always_inline void evmcs_write64(unsigned long field, u64 value) ++{ ++ u16 clean_field; ++ int offset = get_evmcs_offset(field, &clean_field); ++ ++ if (offset < 0) ++ return; ++ ++ *(u64 *)((char *)current_evmcs + offset) = value; ++ ++ current_evmcs->hv_clean_fields &= ~clean_field; ++} ++ ++static __always_inline void evmcs_write32(unsigned long field, u32 value) ++{ ++ u16 clean_field; ++ int offset = get_evmcs_offset(field, &clean_field); ++ ++ if (offset < 0) ++ return; ++ ++ *(u32 *)((char *)current_evmcs + offset) = value; ++ current_evmcs->hv_clean_fields &= ~clean_field; ++} ++ ++static __always_inline void evmcs_write16(unsigned long field, u16 value) ++{ ++ u16 clean_field; ++ int offset = get_evmcs_offset(field, &clean_field); ++ ++ if (offset < 0) ++ return; ++ ++ *(u16 *)((char *)current_evmcs + offset) = value; ++ current_evmcs->hv_clean_fields &= ~clean_field; ++} ++ ++static __always_inline u64 evmcs_read64(unsigned long field) ++{ ++ int offset = get_evmcs_offset(field, NULL); ++ ++ if (offset < 0) ++ return 0; ++ ++ return *(u64 *)((char *)current_evmcs + offset); ++} ++ ++static __always_inline u32 evmcs_read32(unsigned long field) ++{ ++ int offset = get_evmcs_offset(field, NULL); ++ ++ if (offset < 0) ++ return 0; ++ ++ return *(u32 *)((char *)current_evmcs + offset); ++} ++ ++static __always_inline u16 evmcs_read16(unsigned long field) ++{ ++ int offset = get_evmcs_offset(field, NULL); ++ ++ if (offset < 0) ++ return 0; ++ ++ return *(u16 *)((char *)current_evmcs + offset); ++} ++ ++static inline void evmcs_load(u64 phys_addr) ++{ ++ struct hv_vp_assist_page *vp_ap = ++ hv_get_vp_assist_page(smp_processor_id()); ++ ++ if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall) ++ vp_ap->nested_control.features.directhypercall = 1; ++ vp_ap->current_nested_vmcs = phys_addr; ++ vp_ap->enlighten_vmentry = 1; ++} ++ ++void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf); ++#else /* !IS_ENABLED(CONFIG_HYPERV) */ ++static __always_inline bool kvm_is_using_evmcs(void) { return false; } ++static __always_inline void evmcs_write64(unsigned long field, u64 value) {} ++static __always_inline void evmcs_write32(unsigned long field, u32 value) {} ++static __always_inline void evmcs_write16(unsigned long field, u16 value) {} ++static __always_inline u64 evmcs_read64(unsigned long field) { return 0; } ++static __always_inline u32 evmcs_read32(unsigned long field) { return 0; } ++static __always_inline u16 evmcs_read16(unsigned long field) { return 0; } ++static inline void evmcs_load(u64 phys_addr) {} ++#endif /* IS_ENABLED(CONFIG_HYPERV) */ ++ ++#endif /* __ARCH_X86_KVM_VMX_ONHYPERV_H__ */ +diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h +index 6a0c6e81f7f3e..8060e5fc6dbd8 100644 +--- a/arch/x86/kvm/vmx/vmx_ops.h ++++ b/arch/x86/kvm/vmx/vmx_ops.h +@@ -6,7 +6,7 @@ + + #include + +-#include "hyperv.h" ++#include "vmx_onhyperv.h" + #include "vmcs.h" + #include "../x86.h" + +diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c +index 3da3c266a66f3..a936219aebb81 100644 +--- a/drivers/bluetooth/btintel.c ++++ b/drivers/bluetooth/btintel.c +@@ -2845,6 +2845,9 @@ static int btintel_setup_combined(struct hci_dev *hdev) + btintel_set_dsm_reset_method(hdev, &ver_tlv); + + err = btintel_bootloader_setup_tlv(hdev, &ver_tlv); ++ if (err) ++ goto exit_error; ++ + btintel_register_devcoredump_support(hdev); + break; + default: +diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c +index 84d7033e5efe8..ef51dfb39baa9 100644 +--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c ++++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c +@@ -40,10 +40,14 @@ struct qcom_cpufreq_match_data { + const char **genpd_names; + }; + ++struct qcom_cpufreq_drv_cpu { ++ int opp_token; ++}; ++ + struct qcom_cpufreq_drv { +- int *opp_tokens; + u32 versions; + const struct qcom_cpufreq_match_data *data; ++ struct qcom_cpufreq_drv_cpu cpus[]; + }; + + static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev; +@@ -243,42 +247,39 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) + return -ENOENT; + } + +- drv = kzalloc(sizeof(*drv), GFP_KERNEL); +- if (!drv) ++ drv = devm_kzalloc(&pdev->dev, struct_size(drv, cpus, num_possible_cpus()), ++ GFP_KERNEL); ++ if (!drv) { ++ of_node_put(np); + return -ENOMEM; ++ } + + match = pdev->dev.platform_data; + drv->data = match->data; + if (!drv->data) { +- ret = -ENODEV; +- goto free_drv; ++ of_node_put(np); ++ return -ENODEV; + } + + if (drv->data->get_version) { + speedbin_nvmem = of_nvmem_cell_get(np, NULL); + if (IS_ERR(speedbin_nvmem)) { +- ret = dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem), +- "Could not get nvmem cell\n"); +- goto free_drv; ++ of_node_put(np); ++ return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem), ++ "Could not get nvmem cell\n"); + } + + ret = drv->data->get_version(cpu_dev, + speedbin_nvmem, &pvs_name, drv); + if (ret) { ++ of_node_put(np); + nvmem_cell_put(speedbin_nvmem); +- goto free_drv; ++ return ret; + } + nvmem_cell_put(speedbin_nvmem); + } + of_node_put(np); + +- drv->opp_tokens = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tokens), +- GFP_KERNEL); +- if (!drv->opp_tokens) { +- ret = -ENOMEM; +- goto free_drv; +- } +- + for_each_possible_cpu(cpu) { + struct dev_pm_opp_config config = { + .supported_hw = NULL, +@@ -304,9 +305,9 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) + } + + if (config.supported_hw || config.genpd_names) { +- drv->opp_tokens[cpu] = dev_pm_opp_set_config(cpu_dev, &config); +- if (drv->opp_tokens[cpu] < 0) { +- ret = drv->opp_tokens[cpu]; ++ drv->cpus[cpu].opp_token = dev_pm_opp_set_config(cpu_dev, &config); ++ if (drv->cpus[cpu].opp_token < 0) { ++ ret = drv->cpus[cpu].opp_token; + dev_err(cpu_dev, "Failed to set OPP config\n"); + goto free_opp; + } +@@ -325,11 +326,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) + + free_opp: + for_each_possible_cpu(cpu) +- dev_pm_opp_clear_config(drv->opp_tokens[cpu]); +- kfree(drv->opp_tokens); +-free_drv: +- kfree(drv); +- ++ dev_pm_opp_clear_config(drv->cpus[cpu].opp_token); + return ret; + } + +@@ -341,10 +338,7 @@ static void qcom_cpufreq_remove(struct platform_device *pdev) + platform_device_unregister(cpufreq_dt_pdev); + + for_each_possible_cpu(cpu) +- dev_pm_opp_clear_config(drv->opp_tokens[cpu]); +- +- kfree(drv->opp_tokens); +- kfree(drv); ++ dev_pm_opp_clear_config(drv->cpus[cpu].opp_token); + } + + static struct platform_driver qcom_cpufreq_driver = { +diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c +index 793f1a7ad5e34..53fdfd32a7e77 100644 +--- a/drivers/dma/fsl-edma-common.c ++++ b/drivers/dma/fsl-edma-common.c +@@ -3,6 +3,7 @@ + // Copyright (c) 2013-2014 Freescale Semiconductor, Inc + // Copyright (c) 2017 Sysam, Angelo Dureghello + ++#include + #include + #include + #include +@@ -74,18 +75,10 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan) + + flags = fsl_edma_drvflags(fsl_chan); + val = edma_readl_chreg(fsl_chan, ch_sbr); +- /* Remote/local swapped wrongly on iMX8 QM Audio edma */ +- if (flags & FSL_EDMA_DRV_QUIRK_SWAPPED) { +- if (!fsl_chan->is_rxchan) +- val |= EDMA_V3_CH_SBR_RD; +- else +- val |= EDMA_V3_CH_SBR_WR; +- } else { +- if (fsl_chan->is_rxchan) +- val |= EDMA_V3_CH_SBR_RD; +- else +- val |= EDMA_V3_CH_SBR_WR; +- } ++ if (fsl_chan->is_rxchan) ++ val |= EDMA_V3_CH_SBR_RD; ++ else ++ val |= EDMA_V3_CH_SBR_WR; + + if (fsl_chan->is_remote) + val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR); +@@ -97,8 +90,8 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan) + * ch_mux: With the exception of 0, attempts to write a value + * already in use will be forced to 0. + */ +- if (!edma_readl_chreg(fsl_chan, ch_mux)) +- edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux); ++ if (!edma_readl(fsl_chan->edma, fsl_chan->mux_addr)) ++ edma_writel(fsl_chan->edma, fsl_chan->srcid, fsl_chan->mux_addr); + } + + val = edma_readl_chreg(fsl_chan, ch_csr); +@@ -134,7 +127,7 @@ static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan) + flags = fsl_edma_drvflags(fsl_chan); + + if (flags & FSL_EDMA_DRV_HAS_CHMUX) +- edma_writel_chreg(fsl_chan, 0, ch_mux); ++ edma_writel(fsl_chan->edma, 0, fsl_chan->mux_addr); + + val &= ~EDMA_V3_CH_CSR_ERQ; + edma_writel_chreg(fsl_chan, val, ch_csr); +@@ -754,6 +747,8 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan, + fsl_desc->iscyclic = false; + + fsl_chan->is_sw = true; ++ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_MEM_REMOTE) ++ fsl_chan->is_remote = true; + + /* To match with copy_align and max_seg_size so 1 tcd is enough */ + fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst, +@@ -802,6 +797,9 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan) + { + struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); + ++ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK) ++ clk_prepare_enable(fsl_chan->clk); ++ + fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev, + sizeof(struct fsl_edma_hw_tcd), + 32, 0); +@@ -829,6 +827,9 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan) + fsl_chan->tcd_pool = NULL; + fsl_chan->is_sw = false; + fsl_chan->srcid = 0; ++ fsl_chan->is_remote = false; ++ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK) ++ clk_disable_unprepare(fsl_chan->clk); + } + + void fsl_edma_cleanup_vchan(struct dma_device *dmadev) +diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h +index 92fe53faa53b1..6028389de408b 100644 +--- a/drivers/dma/fsl-edma-common.h ++++ b/drivers/dma/fsl-edma-common.h +@@ -146,6 +146,7 @@ struct fsl_edma_chan { + enum dma_data_direction dma_dir; + char chan_name[32]; + struct fsl_edma_hw_tcd __iomem *tcd; ++ void __iomem *mux_addr; + u32 real_count; + struct work_struct issue_worker; + struct platform_device *pdev; +@@ -177,8 +178,7 @@ struct fsl_edma_desc { + #define FSL_EDMA_DRV_HAS_PD BIT(5) + #define FSL_EDMA_DRV_HAS_CHCLK BIT(6) + #define FSL_EDMA_DRV_HAS_CHMUX BIT(7) +-/* imx8 QM audio edma remote local swapped */ +-#define FSL_EDMA_DRV_QUIRK_SWAPPED BIT(8) ++#define FSL_EDMA_DRV_MEM_REMOTE BIT(8) + /* control and status register is in tcd address space, edma3 reg layout */ + #define FSL_EDMA_DRV_SPLIT_REG BIT(9) + #define FSL_EDMA_DRV_BUS_8BYTE BIT(10) +@@ -207,6 +207,8 @@ struct fsl_edma_drvdata { + u32 chreg_off; + u32 chreg_space_sz; + u32 flags; ++ u32 mux_off; /* channel mux register offset */ ++ u32 mux_skip; /* how much skip for each channel */ + int (*setup_irq)(struct platform_device *pdev, + struct fsl_edma_engine *fsl_edma); + }; +diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c +index 42a338cbe6143..8a0ae90548997 100644 +--- a/drivers/dma/fsl-edma-main.c ++++ b/drivers/dma/fsl-edma-main.c +@@ -340,16 +340,19 @@ static struct fsl_edma_drvdata imx7ulp_data = { + }; + + static struct fsl_edma_drvdata imx8qm_data = { +- .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3, ++ .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MEM_REMOTE, + .chreg_space_sz = 0x10000, + .chreg_off = 0x10000, + .setup_irq = fsl_edma3_irq_init, + }; + +-static struct fsl_edma_drvdata imx8qm_audio_data = { +- .flags = FSL_EDMA_DRV_QUIRK_SWAPPED | FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3, ++static struct fsl_edma_drvdata imx8ulp_data = { ++ .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_CHCLK | FSL_EDMA_DRV_HAS_DMACLK | ++ FSL_EDMA_DRV_EDMA3, + .chreg_space_sz = 0x10000, + .chreg_off = 0x10000, ++ .mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux), ++ .mux_skip = 0x10000, + .setup_irq = fsl_edma3_irq_init, + }; + +@@ -364,6 +367,8 @@ static struct fsl_edma_drvdata imx93_data4 = { + .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4, + .chreg_space_sz = 0x8000, + .chreg_off = 0x10000, ++ .mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux), ++ .mux_skip = 0x8000, + .setup_irq = fsl_edma3_irq_init, + }; + +@@ -372,7 +377,7 @@ static const struct of_device_id fsl_edma_dt_ids[] = { + { .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data}, + { .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data}, + { .compatible = "fsl,imx8qm-edma", .data = &imx8qm_data}, +- { .compatible = "fsl,imx8qm-adma", .data = &imx8qm_audio_data}, ++ { .compatible = "fsl,imx8ulp-edma", .data = &imx8ulp_data}, + { .compatible = "fsl,imx93-edma3", .data = &imx93_data3}, + { .compatible = "fsl,imx93-edma4", .data = &imx93_data4}, + { /* sentinel */ } +@@ -427,6 +432,7 @@ static int fsl_edma_probe(struct platform_device *pdev) + struct fsl_edma_engine *fsl_edma; + const struct fsl_edma_drvdata *drvdata = NULL; + u32 chan_mask[2] = {0, 0}; ++ char clk_name[36]; + struct edma_regs *regs; + int chans; + int ret, i; +@@ -540,12 +546,23 @@ static int fsl_edma_probe(struct platform_device *pdev) + offsetof(struct fsl_edma3_ch_reg, tcd) : 0; + fsl_chan->tcd = fsl_edma->membase + + i * drvdata->chreg_space_sz + drvdata->chreg_off + len; ++ fsl_chan->mux_addr = fsl_edma->membase + drvdata->mux_off + i * drvdata->mux_skip; + ++ if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) { ++ snprintf(clk_name, sizeof(clk_name), "ch%02d", i); ++ fsl_chan->clk = devm_clk_get_enabled(&pdev->dev, ++ (const char *)clk_name); ++ ++ if (IS_ERR(fsl_chan->clk)) ++ return PTR_ERR(fsl_chan->clk); ++ } + fsl_chan->pdev = pdev; + vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev); + + edma_write_tcdreg(fsl_chan, 0, csr); + fsl_edma_chan_mux(fsl_chan, 0, false); ++ if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) ++ clk_disable_unprepare(fsl_chan->clk); + } + + ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma); +diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig +index b59e3041fd627..f0e9f250669e2 100644 +--- a/drivers/firmware/Kconfig ++++ b/drivers/firmware/Kconfig +@@ -229,6 +229,7 @@ config QCOM_SCM_DOWNLOAD_MODE_DEFAULT + config SYSFB + bool + select BOOT_VESA_SUPPORT ++ select SCREEN_INFO + + config SYSFB_SIMPLEFB + bool "Mark VGA/VBE/EFI FB as generic system framebuffer" +diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c +index 3c197db42c9d9..defd7a36cb08a 100644 +--- a/drivers/firmware/sysfb.c ++++ b/drivers/firmware/sysfb.c +@@ -77,6 +77,8 @@ static __init int sysfb_init(void) + bool compatible; + int ret = 0; + ++ screen_info_apply_fixups(); ++ + mutex_lock(&disable_lock); + if (disabled) + goto unlock_mutex; +diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +index d0255ea98348d..247e7d675e2b9 100644 +--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c ++++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +@@ -1556,7 +1556,7 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params, + } + + static int +-skl_ddi_calculate_wrpll(int clock /* in Hz */, ++skl_ddi_calculate_wrpll(int clock, + int ref_clock, + struct skl_wrpll_params *wrpll_params) + { +@@ -1581,7 +1581,7 @@ skl_ddi_calculate_wrpll(int clock /* in Hz */, + }; + unsigned int dco, d, i; + unsigned int p0, p1, p2; +- u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */ ++ u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */ + + for (d = 0; d < ARRAY_SIZE(dividers); d++) { + for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) { +@@ -1713,7 +1713,7 @@ static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) + + ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); + +- ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000, ++ ret = skl_ddi_calculate_wrpll(crtc_state->port_clock, + i915->display.dpll.ref_clks.nssc, &wrpll_params); + if (ret) + return ret; +diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h +index 8023c85c7fa0e..74059384892af 100644 +--- a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h ++++ b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h +@@ -249,7 +249,7 @@ + #define HDCP2_STREAM_STATUS(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP2_STREAM_STATUS(trans) : \ +- PIPE_HDCP2_STREAM_STATUS(pipe)) ++ PIPE_HDCP2_STREAM_STATUS(port)) + + #define _PORTA_HDCP2_AUTH_STREAM 0x66F00 + #define _PORTB_HDCP2_AUTH_STREAM 0x66F04 +diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c +index 3f90403d86cb4..0808b54d3c518 100644 +--- a/drivers/gpu/drm/i915/i915_perf.c ++++ b/drivers/gpu/drm/i915/i915_perf.c +@@ -2781,26 +2781,6 @@ oa_configure_all_contexts(struct i915_perf_stream *stream, + return 0; + } + +-static int +-gen12_configure_all_contexts(struct i915_perf_stream *stream, +- const struct i915_oa_config *oa_config, +- struct i915_active *active) +-{ +- struct flex regs[] = { +- { +- GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE), +- CTX_R_PWR_CLK_STATE, +- }, +- }; +- +- if (stream->engine->class != RENDER_CLASS) +- return 0; +- +- return oa_configure_all_contexts(stream, +- regs, ARRAY_SIZE(regs), +- active); +-} +- + static int + lrc_configure_all_contexts(struct i915_perf_stream *stream, + const struct i915_oa_config *oa_config, +@@ -2907,7 +2887,6 @@ gen12_enable_metric_set(struct i915_perf_stream *stream, + { + struct drm_i915_private *i915 = stream->perf->i915; + struct intel_uncore *uncore = stream->uncore; +- struct i915_oa_config *oa_config = stream->oa_config; + bool periodic = stream->periodic; + u32 period_exponent = stream->period_exponent; + u32 sqcnt1; +@@ -2951,15 +2930,6 @@ gen12_enable_metric_set(struct i915_perf_stream *stream, + + intel_uncore_rmw(uncore, GEN12_SQCNT1, 0, sqcnt1); + +- /* +- * Update all contexts prior writing the mux configurations as we need +- * to make sure all slices/subslices are ON before writing to NOA +- * registers. +- */ +- ret = gen12_configure_all_contexts(stream, oa_config, active); +- if (ret) +- return ret; +- + /* + * For Gen12, performance counters are context + * saved/restored. Only enable it for the context that +@@ -3014,9 +2984,6 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream) + _MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING)); + } + +- /* Reset all contexts' slices/subslices configurations. */ +- gen12_configure_all_contexts(stream, NULL, NULL); +- + /* disable the context save/restore or OAR counters */ + if (stream->ctx) + gen12_configure_oar_context(stream, NULL); +diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c +index 1b2ff0c40fc1c..6c599a9f49ee4 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_prime.c ++++ b/drivers/gpu/drm/nouveau/nouveau_prime.c +@@ -64,7 +64,8 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, + * to the caller, instead of a normal nouveau_bo ttm reference. */ + ret = drm_gem_object_init(dev, &nvbo->bo.base, size); + if (ret) { +- nouveau_bo_ref(NULL, &nvbo); ++ drm_gem_object_release(&nvbo->bo.base); ++ kfree(nvbo); + obj = ERR_PTR(-ENOMEM); + goto unlock; + } +diff --git a/drivers/gpu/drm/virtio/virtgpu_submit.c b/drivers/gpu/drm/virtio/virtgpu_submit.c +index 5c514946bbad9..d530c058f53e2 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_submit.c ++++ b/drivers/gpu/drm/virtio/virtgpu_submit.c +@@ -48,7 +48,7 @@ struct virtio_gpu_submit { + static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit, + struct dma_fence *in_fence) + { +- u32 context = submit->fence_ctx + submit->ring_idx; ++ u64 context = submit->fence_ctx + submit->ring_idx; + + if (dma_fence_match_context(in_fence, context)) + return 0; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +index 5efc6a766f64e..588d50ababf60 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +@@ -32,7 +32,6 @@ + #define VMW_FENCE_WRAP (1 << 31) + + struct vmw_fence_manager { +- int num_fence_objects; + struct vmw_private *dev_priv; + spinlock_t lock; + struct list_head fence_list; +@@ -124,13 +123,13 @@ static void vmw_fence_obj_destroy(struct dma_fence *f) + { + struct vmw_fence_obj *fence = + container_of(f, struct vmw_fence_obj, base); +- + struct vmw_fence_manager *fman = fman_from_fence(fence); + +- spin_lock(&fman->lock); +- list_del_init(&fence->head); +- --fman->num_fence_objects; +- spin_unlock(&fman->lock); ++ if (!list_empty(&fence->head)) { ++ spin_lock(&fman->lock); ++ list_del_init(&fence->head); ++ spin_unlock(&fman->lock); ++ } + fence->destroy(fence); + } + +@@ -257,7 +256,6 @@ static const struct dma_fence_ops vmw_fence_ops = { + .release = vmw_fence_obj_destroy, + }; + +- + /* + * Execute signal actions on fences recently signaled. + * This is done from a workqueue so we don't have to execute +@@ -355,7 +353,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman, + goto out_unlock; + } + list_add_tail(&fence->head, &fman->fence_list); +- ++fman->num_fence_objects; + + out_unlock: + spin_unlock(&fman->lock); +@@ -403,7 +400,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman, + u32 passed_seqno) + { + u32 goal_seqno; +- struct vmw_fence_obj *fence; ++ struct vmw_fence_obj *fence, *next_fence; + + if (likely(!fman->seqno_valid)) + return false; +@@ -413,7 +410,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman, + return false; + + fman->seqno_valid = false; +- list_for_each_entry(fence, &fman->fence_list, head) { ++ list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { + if (!list_empty(&fence->seq_passed_actions)) { + fman->seqno_valid = true; + vmw_fence_goal_write(fman->dev_priv, +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +index c45b4724e4141..e20f64b67b266 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +@@ -92,7 +92,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv, + { + struct vmw_escape_video_flush *flush; + size_t fifo_size; +- bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object); ++ bool have_so = (dev_priv->active_display_unit != vmw_du_legacy); + int i, num_items; + SVGAGuestPtr ptr; + +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +index 4ccab07faff08..cb03c589ab226 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +@@ -868,6 +868,32 @@ vmw_stdu_connector_mode_valid(struct drm_connector *connector, + return MODE_OK; + } + ++/* ++ * Trigger a modeset if the X,Y position of the Screen Target changes. ++ * This is needed when multi-mon is cycled. The original Screen Target will have ++ * the same mode but its relative X,Y position in the topology will change. ++ */ ++static int vmw_stdu_connector_atomic_check(struct drm_connector *conn, ++ struct drm_atomic_state *state) ++{ ++ struct drm_connector_state *conn_state; ++ struct vmw_screen_target_display_unit *du; ++ struct drm_crtc_state *new_crtc_state; ++ ++ conn_state = drm_atomic_get_connector_state(state, conn); ++ du = vmw_connector_to_stdu(conn); ++ ++ if (!conn_state->crtc) ++ return 0; ++ ++ new_crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); ++ if (du->base.gui_x != du->base.set_gui_x || ++ du->base.gui_y != du->base.set_gui_y) ++ new_crtc_state->mode_changed = true; ++ ++ return 0; ++} ++ + static const struct drm_connector_funcs vmw_stdu_connector_funcs = { + .dpms = vmw_du_connector_dpms, + .detect = vmw_du_connector_detect, +@@ -882,7 +908,8 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = { + static const struct + drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = { + .get_modes = vmw_connector_get_modes, +- .mode_valid = vmw_stdu_connector_mode_valid ++ .mode_valid = vmw_stdu_connector_mode_valid, ++ .atomic_check = vmw_stdu_connector_atomic_check, + }; + + +diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c +index bdb578e0899f5..4b59687ff5d82 100644 +--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c ++++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c +@@ -288,12 +288,22 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata) + mp2_ops->start(privdata, info); + cl_data->sensor_sts[i] = amd_sfh_wait_for_response + (privdata, cl_data->sensor_idx[i], SENSOR_ENABLED); ++ ++ if (cl_data->sensor_sts[i] == SENSOR_ENABLED) ++ cl_data->is_any_sensor_enabled = true; ++ } ++ ++ if (!cl_data->is_any_sensor_enabled || ++ (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) { ++ dev_warn(dev, "Failed to discover, sensors not enabled is %d\n", ++ cl_data->is_any_sensor_enabled); ++ rc = -EOPNOTSUPP; ++ goto cleanup; + } + + for (i = 0; i < cl_data->num_hid_devices; i++) { + cl_data->cur_hid_dev = i; + if (cl_data->sensor_sts[i] == SENSOR_ENABLED) { +- cl_data->is_any_sensor_enabled = true; + rc = amdtp_hid_probe(i, cl_data); + if (rc) + goto cleanup; +@@ -305,12 +315,6 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata) + cl_data->sensor_sts[i]); + } + +- if (!cl_data->is_any_sensor_enabled || +- (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) { +- dev_warn(dev, "Failed to discover, sensors not enabled is %d\n", cl_data->is_any_sensor_enabled); +- rc = -EOPNOTSUPP; +- goto cleanup; +- } + schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP)); + return 0; + +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c +index 002cbaa16bd16..d2fe14ce423e2 100644 +--- a/drivers/hid/wacom_wac.c ++++ b/drivers/hid/wacom_wac.c +@@ -714,13 +714,12 @@ static int wacom_intuos_get_tool_type(int tool_id) + case 0x8e2: /* IntuosHT2 pen */ + case 0x022: + case 0x200: /* Pro Pen 3 */ +- case 0x04200: /* Pro Pen 3 */ + case 0x10842: /* MobileStudio Pro Pro Pen slim */ + case 0x14802: /* Intuos4/5 13HD/24HD Classic Pen */ + case 0x16802: /* Cintiq 13HD Pro Pen */ + case 0x18802: /* DTH2242 Pen */ + case 0x10802: /* Intuos4/5 13HD/24HD General Pen */ +- case 0x80842: /* Intuos Pro and Cintiq Pro 3D Pen */ ++ case 0x8842: /* Intuos Pro and Cintiq Pro 3D Pen */ + tool_type = BTN_TOOL_PEN; + break; + +diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c +index 4f5829b726a75..72fd2fe8f6fe8 100644 +--- a/drivers/leds/led-triggers.c ++++ b/drivers/leds/led-triggers.c +@@ -194,11 +194,24 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig) + spin_unlock(&trig->leddev_list_lock); + led_cdev->trigger = trig; + ++ /* ++ * Some activate() calls use led_trigger_event() to initialize ++ * the brightness of the LED for which the trigger is being set. ++ * Ensure the led_cdev is visible on trig->led_cdevs for this. ++ */ ++ synchronize_rcu(); ++ ++ /* ++ * If "set brightness to 0" is pending in workqueue, ++ * we don't want that to be reordered after ->activate() ++ */ ++ flush_work(&led_cdev->set_brightness_work); ++ ++ ret = 0; + if (trig->activate) + ret = trig->activate(led_cdev); + else +- ret = 0; +- ++ led_set_brightness(led_cdev, trig->brightness); + if (ret) + goto err_activate; + +@@ -269,19 +282,6 @@ void led_trigger_set_default(struct led_classdev *led_cdev) + } + EXPORT_SYMBOL_GPL(led_trigger_set_default); + +-void led_trigger_rename_static(const char *name, struct led_trigger *trig) +-{ +- /* new name must be on a temporary string to prevent races */ +- BUG_ON(name == trig->name); +- +- down_write(&triggers_list_lock); +- /* this assumes that trig->name was originaly allocated to +- * non constant storage */ +- strcpy((char *)trig->name, name); +- up_write(&triggers_list_lock); +-} +-EXPORT_SYMBOL_GPL(led_trigger_rename_static); +- + /* LED Trigger Interface */ + + int led_trigger_register(struct led_trigger *trig) +@@ -386,6 +386,8 @@ void led_trigger_event(struct led_trigger *trig, + if (!trig) + return; + ++ trig->brightness = brightness; ++ + rcu_read_lock(); + list_for_each_entry_rcu(led_cdev, &trig->led_cdevs, trig_list) + led_set_brightness(led_cdev, brightness); +diff --git a/drivers/leds/trigger/ledtrig-timer.c b/drivers/leds/trigger/ledtrig-timer.c +index b4688d1d9d2b2..1d213c999d40a 100644 +--- a/drivers/leds/trigger/ledtrig-timer.c ++++ b/drivers/leds/trigger/ledtrig-timer.c +@@ -110,11 +110,6 @@ static int timer_trig_activate(struct led_classdev *led_cdev) + led_cdev->flags &= ~LED_INIT_DEFAULT_TRIGGER; + } + +- /* +- * If "set brightness to 0" is pending in workqueue, we don't +- * want that to be reordered after blink_set() +- */ +- flush_work(&led_cdev->set_brightness_work); + led_blink_set(led_cdev, &led_cdev->blink_delay_on, + &led_cdev->blink_delay_off); + +diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c +index 24c914015973e..49b1fa9651161 100644 +--- a/drivers/net/ethernet/intel/ice/ice_txrx.c ++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c +@@ -456,7 +456,7 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring) + if (rx_ring->vsi->type == ICE_VSI_PF) + if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); +- rx_ring->xdp_prog = NULL; ++ WRITE_ONCE(rx_ring->xdp_prog, NULL); + if (rx_ring->xsk_pool) { + kfree(rx_ring->xdp_buf); + rx_ring->xdp_buf = NULL; +diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c +index f53566cb6bfbd..67511153081ae 100644 +--- a/drivers/net/ethernet/intel/ice/ice_xsk.c ++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c +@@ -52,10 +52,8 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) + static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) + { + ice_clean_tx_ring(vsi->tx_rings[q_idx]); +- if (ice_is_xdp_ena_vsi(vsi)) { +- synchronize_rcu(); ++ if (ice_is_xdp_ena_vsi(vsi)) + ice_clean_tx_ring(vsi->xdp_rings[q_idx]); +- } + ice_clean_rx_ring(vsi->rx_rings[q_idx]); + } + +@@ -180,11 +178,12 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) + usleep_range(1000, 2000); + } + ++ synchronize_net(); ++ netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); ++ + ice_qvec_dis_irq(vsi, rx_ring, q_vector); + ice_qvec_toggle_napi(vsi, q_vector, false); + +- netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); +- + ice_fill_txq_meta(vsi, tx_ring, &txq_meta); + err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta); + if (err) +@@ -199,10 +198,8 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) + if (err) + return err; + } +- err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true); +- if (err) +- return err; + ++ ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false); + ice_qp_clean_rings(vsi, q_idx); + ice_qp_reset_stats(vsi, q_idx); + +@@ -1068,6 +1065,10 @@ bool ice_xmit_zc(struct ice_tx_ring *xdp_ring) + + ice_clean_xdp_irq_zc(xdp_ring); + ++ if (!netif_carrier_ok(xdp_ring->vsi->netdev) || ++ !netif_running(xdp_ring->vsi->netdev)) ++ return true; ++ + budget = ICE_DESC_UNUSED(xdp_ring); + budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring)); + +@@ -1111,7 +1112,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, + struct ice_vsi *vsi = np->vsi; + struct ice_tx_ring *ring; + +- if (test_bit(ICE_VSI_DOWN, vsi->state)) ++ if (test_bit(ICE_VSI_DOWN, vsi->state) || !netif_carrier_ok(netdev)) + return -ENETDOWN; + + if (!ice_is_xdp_ena_vsi(vsi)) +diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c +index e83700ad7e622..d80bbcdeb93ed 100644 +--- a/drivers/net/ethernet/intel/igc/igc_main.c ++++ b/drivers/net/ethernet/intel/igc/igc_main.c +@@ -6208,21 +6208,6 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, + size_t n; + int i; + +- switch (qopt->cmd) { +- case TAPRIO_CMD_REPLACE: +- break; +- case TAPRIO_CMD_DESTROY: +- return igc_tsn_clear_schedule(adapter); +- case TAPRIO_CMD_STATS: +- igc_taprio_stats(adapter->netdev, &qopt->stats); +- return 0; +- case TAPRIO_CMD_QUEUE_STATS: +- igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats); +- return 0; +- default: +- return -EOPNOTSUPP; +- } +- + if (qopt->base_time < 0) + return -ERANGE; + +@@ -6331,7 +6316,23 @@ static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + +- err = igc_save_qbv_schedule(adapter, qopt); ++ switch (qopt->cmd) { ++ case TAPRIO_CMD_REPLACE: ++ err = igc_save_qbv_schedule(adapter, qopt); ++ break; ++ case TAPRIO_CMD_DESTROY: ++ err = igc_tsn_clear_schedule(adapter); ++ break; ++ case TAPRIO_CMD_STATS: ++ igc_taprio_stats(adapter->netdev, &qopt->stats); ++ return 0; ++ case TAPRIO_CMD_QUEUE_STATS: ++ igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats); ++ return 0; ++ default: ++ return -EOPNOTSUPP; ++ } ++ + if (err) + return err; + +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +index 05f4aa11b95c3..34051c9abd97d 100644 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +@@ -953,13 +953,13 @@ static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port, + static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en) + { + struct mvpp2_port *port; +- int i; ++ int i, j; + + for (i = 0; i < priv->port_count; i++) { + port = priv->port_list[i]; + if (port->priv->percpu_pools) { +- for (i = 0; i < port->nrxqs; i++) +- mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], ++ for (j = 0; j < port->nrxqs; j++) ++ mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[j], + port->tx_fc & en); + } else { + mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +index fadfa8b50bebe..8c4e3ecef5901 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +@@ -920,6 +920,7 @@ mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv, + mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, mh); + mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id); + err_mod_hdr: ++ *attr = *old_attr; + kfree(old_attr); + err_attr: + kvfree(spec); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +index ce29e31721208..de83567aae791 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +@@ -50,9 +50,10 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) + MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap)) + caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD; + +- if ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) && +- MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) || +- MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level)) ++ if (IS_ENABLED(CONFIG_MLX5_CLS_ACT) && ++ ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) && ++ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) || ++ MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))) + caps |= MLX5_IPSEC_CAP_PRIO; + + if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +index 38263d5c98b34..50db127e6371b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +@@ -1223,7 +1223,12 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, + if (!an_changes && link_modes == eproto.admin) + goto out; + +- mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext); ++ err = mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext); ++ if (err) { ++ netdev_err(priv->netdev, "%s: failed to set ptys reg: %d\n", __func__, err); ++ goto out; ++ } ++ + mlx5_toggle_port_link(mdev); + + out: +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +index 3a9cdf79403ae..6b17346aa4cef 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +@@ -206,6 +206,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev) + static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded) + { + struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; ++ struct devlink *devlink = priv_to_devlink(dev); + + /* if this is the driver that initiated the fw reset, devlink completed the reload */ + if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) { +@@ -217,9 +218,11 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unload + mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n"); + else + mlx5_load_one(dev, true); +- devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0, ++ devl_lock(devlink); ++ devlink_remote_reload_actions_performed(devlink, 0, + BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | + BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE)); ++ devl_unlock(devlink); + } + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c +index 612e666ec2635..e2230c8f18152 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c +@@ -48,6 +48,7 @@ static struct mlx5_irq * + irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc) + { + struct irq_affinity_desc auto_desc = {}; ++ struct mlx5_irq *irq; + u32 irq_index; + int err; + +@@ -64,9 +65,12 @@ irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_de + else + cpu_get(pool, cpumask_first(&af_desc->mask)); + } +- return mlx5_irq_alloc(pool, irq_index, +- cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc, +- NULL); ++ irq = mlx5_irq_alloc(pool, irq_index, ++ cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc, ++ NULL); ++ if (IS_ERR(irq)) ++ xa_erase(&pool->irqs, irq_index); ++ return irq; + } + + /* Looking for the IRQ with the smallest refcount that fits req_mask. +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +index dfc2ba6f780a2..18cf756bad8cc 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +@@ -1512,7 +1512,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev, + goto unlock; + + for (i = 0; i < ldev->ports; i++) { +- if (ldev->pf[MLX5_LAG_P1].netdev == slave) { ++ if (ldev->pf[i].netdev == slave) { + port = i; + break; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index 2237b3d01e0e5..11f11248feb8b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -2130,7 +2130,6 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) + /* Panic tear down fw command will stop the PCI bus communication + * with the HCA, so the health poll is no longer needed. + */ +- mlx5_drain_health_wq(dev); + mlx5_stop_health_poll(dev, false); + + ret = mlx5_cmd_fast_teardown_hca(dev); +@@ -2165,6 +2164,7 @@ static void shutdown(struct pci_dev *pdev) + + mlx5_core_info(dev, "Shutdown was called\n"); + set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state); ++ mlx5_drain_health_wq(dev); + err = mlx5_try_fast_unload(dev); + if (err) + mlx5_unload_one(dev, false); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +index 30218f37d5285..2028acbe85ca2 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +@@ -90,6 +90,7 @@ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev) + struct mlx5_core_dev *mdev = sf_dev->mdev; + + set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state); ++ mlx5_drain_health_wq(mdev); + mlx5_unload_one(mdev, false); + } + +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c +index d759f3373b175..8a732edac15a0 100644 +--- a/drivers/net/ethernet/realtek/r8169_main.c ++++ b/drivers/net/ethernet/realtek/r8169_main.c +@@ -4256,7 +4256,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + if (unlikely(!rtl_tx_slots_avail(tp))) { + if (net_ratelimit()) + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); +- goto err_stop_0; ++ netif_stop_queue(dev); ++ return NETDEV_TX_BUSY; + } + + opts[1] = rtl8169_tx_vlan_tag(skb); +@@ -4312,11 +4313,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; +- +-err_stop_0: +- netif_stop_queue(dev); +- dev->stats.tx_dropped++; +- return NETDEV_TX_BUSY; + } + + static unsigned int rtl_last_frag_len(struct sk_buff *skb) +diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +index 3297aff969c80..11e08cb8d3c3e 100644 +--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +@@ -1826,9 +1826,9 @@ static void axienet_dma_err_handler(struct work_struct *work) + ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); + axienet_set_mac_address(ndev, NULL); + axienet_set_multicast_list(ndev); +- axienet_setoptions(ndev, lp->options); + napi_enable(&lp->napi_rx); + napi_enable(&lp->napi_tx); ++ axienet_setoptions(ndev, lp->options); + } + + /** +diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c +index 029c82f88ee38..9a0432145645f 100644 +--- a/drivers/net/phy/micrel.c ++++ b/drivers/net/phy/micrel.c +@@ -1293,6 +1293,8 @@ static int ksz9131_config_init(struct phy_device *phydev) + const struct device *dev_walker; + int ret; + ++ phydev->mdix_ctrl = ETH_TP_MDI_AUTO; ++ + dev_walker = &phydev->mdio.dev; + do { + of_node = dev_walker->of_node; +@@ -1342,28 +1344,30 @@ static int ksz9131_config_init(struct phy_device *phydev) + #define MII_KSZ9131_AUTO_MDIX 0x1C + #define MII_KSZ9131_AUTO_MDI_SET BIT(7) + #define MII_KSZ9131_AUTO_MDIX_SWAP_OFF BIT(6) ++#define MII_KSZ9131_DIG_AXAN_STS 0x14 ++#define MII_KSZ9131_DIG_AXAN_STS_LINK_DET BIT(14) ++#define MII_KSZ9131_DIG_AXAN_STS_A_SELECT BIT(12) + + static int ksz9131_mdix_update(struct phy_device *phydev) + { + int ret; + +- ret = phy_read(phydev, MII_KSZ9131_AUTO_MDIX); +- if (ret < 0) +- return ret; +- +- if (ret & MII_KSZ9131_AUTO_MDIX_SWAP_OFF) { +- if (ret & MII_KSZ9131_AUTO_MDI_SET) +- phydev->mdix_ctrl = ETH_TP_MDI; +- else +- phydev->mdix_ctrl = ETH_TP_MDI_X; ++ if (phydev->mdix_ctrl != ETH_TP_MDI_AUTO) { ++ phydev->mdix = phydev->mdix_ctrl; + } else { +- phydev->mdix_ctrl = ETH_TP_MDI_AUTO; +- } ++ ret = phy_read(phydev, MII_KSZ9131_DIG_AXAN_STS); ++ if (ret < 0) ++ return ret; + +- if (ret & MII_KSZ9131_AUTO_MDI_SET) +- phydev->mdix = ETH_TP_MDI; +- else +- phydev->mdix = ETH_TP_MDI_X; ++ if (ret & MII_KSZ9131_DIG_AXAN_STS_LINK_DET) { ++ if (ret & MII_KSZ9131_DIG_AXAN_STS_A_SELECT) ++ phydev->mdix = ETH_TP_MDI; ++ else ++ phydev->mdix = ETH_TP_MDI_X; ++ } else { ++ phydev->mdix = ETH_TP_MDI_INVALID; ++ } ++ } + + return 0; + } +diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c +index 337899c69738e..2604d9663a5b2 100644 +--- a/drivers/net/phy/realtek.c ++++ b/drivers/net/phy/realtek.c +@@ -1083,6 +1083,13 @@ static struct phy_driver realtek_drvs[] = { + .handle_interrupt = genphy_handle_interrupt_no_ack, + .suspend = genphy_suspend, + .resume = genphy_resume, ++ }, { ++ PHY_ID_MATCH_EXACT(0x001cc960), ++ .name = "RTL8366S Gigabit Ethernet", ++ .suspend = genphy_suspend, ++ .resume = genphy_resume, ++ .read_mmd = genphy_read_mmd_unsupported, ++ .write_mmd = genphy_write_mmd_unsupported, + }, + }; + +diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c +index 0a662e42ed965..cb7d2f798fb43 100644 +--- a/drivers/net/usb/sr9700.c ++++ b/drivers/net/usb/sr9700.c +@@ -179,6 +179,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc) + struct usbnet *dev = netdev_priv(netdev); + __le16 res; + int rc = 0; ++ int err; + + if (phy_id) { + netdev_dbg(netdev, "Only internal phy supported\n"); +@@ -189,11 +190,17 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc) + if (loc == MII_BMSR) { + u8 value; + +- sr_read_reg(dev, SR_NSR, &value); ++ err = sr_read_reg(dev, SR_NSR, &value); ++ if (err < 0) ++ return err; ++ + if (value & NSR_LINKST) + rc = 1; + } +- sr_share_read_word(dev, 1, loc, &res); ++ err = sr_share_read_word(dev, 1, loc, &res); ++ if (err < 0) ++ return err; ++ + if (rc == 1) + res = le16_to_cpu(res) | BMSR_LSTATUS; + else +diff --git a/drivers/pci/search.c b/drivers/pci/search.c +index b4c138a6ec025..53840634fbfc2 100644 +--- a/drivers/pci/search.c ++++ b/drivers/pci/search.c +@@ -363,6 +363,37 @@ struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) + } + EXPORT_SYMBOL(pci_get_class); + ++/** ++ * pci_get_base_class - searching for a PCI device by matching against the base class code only ++ * @class: search for a PCI device with this base class code ++ * @from: Previous PCI device found in search, or %NULL for new search. ++ * ++ * Iterates through the list of known PCI devices. If a PCI device is found ++ * with a matching base class code, the reference count to the device is ++ * incremented. See pci_match_one_device() to figure out how does this works. ++ * A new search is initiated by passing %NULL as the @from argument. ++ * Otherwise if @from is not %NULL, searches continue from next device on the ++ * global list. The reference count for @from is always decremented if it is ++ * not %NULL. ++ * ++ * Returns: ++ * A pointer to a matched PCI device, %NULL Otherwise. ++ */ ++struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from) ++{ ++ struct pci_device_id id = { ++ .vendor = PCI_ANY_ID, ++ .device = PCI_ANY_ID, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .class_mask = 0xFF0000, ++ .class = class << 16, ++ }; ++ ++ return pci_get_dev_by_id(&id, from); ++} ++EXPORT_SYMBOL(pci_get_base_class); ++ + /** + * pci_dev_present - Returns 1 if device matching the device list is present, 0 if not. + * @ids: A pointer to a null terminated list of struct pci_device_id structures +diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c +index 5cf770a1bc312..4f6eade522024 100644 +--- a/drivers/perf/fsl_imx9_ddr_perf.c ++++ b/drivers/perf/fsl_imx9_ddr_perf.c +@@ -476,12 +476,12 @@ static int ddr_perf_event_add(struct perf_event *event, int flags) + hwc->idx = counter; + hwc->state |= PERF_HES_STOPPED; + +- if (flags & PERF_EF_START) +- ddr_perf_event_start(event, flags); +- + /* read trans, write trans, read beat */ + ddr_perf_monitor_config(pmu, cfg, cfg1, cfg2); + ++ if (flags & PERF_EF_START) ++ ddr_perf_event_start(event, flags); ++ + return 0; + } + +diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c +index ae16ecb15f2d9..901da688ea3f8 100644 +--- a/drivers/perf/riscv_pmu_sbi.c ++++ b/drivers/perf/riscv_pmu_sbi.c +@@ -355,7 +355,7 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event) + * but not in the user access mode as we want to use the other counters + * that support sampling/filtering. + */ +- if (hwc->flags & PERF_EVENT_FLAG_LEGACY) { ++ if ((hwc->flags & PERF_EVENT_FLAG_LEGACY) && (event->attr.type == PERF_TYPE_HARDWARE)) { + if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) { + cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH; + cmask = 1; +diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c +index 475a6dd72db6b..809fabef3b44a 100644 +--- a/drivers/platform/chrome/cros_ec_proto.c ++++ b/drivers/platform/chrome/cros_ec_proto.c +@@ -805,9 +805,11 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, + if (ret == -ENOPROTOOPT) { + dev_dbg(ec_dev->dev, + "GET_NEXT_EVENT returned invalid version error.\n"); ++ mutex_lock(&ec_dev->lock); + ret = cros_ec_get_host_command_version_mask(ec_dev, + EC_CMD_GET_NEXT_EVENT, + &ver_mask); ++ mutex_unlock(&ec_dev->lock); + if (ret < 0 || ver_mask == 0) + /* + * Do not change the MKBP supported version if we can't +diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c +index 3acc9288b3105..3b1030fc4fbfe 100644 +--- a/drivers/thermal/broadcom/bcm2835_thermal.c ++++ b/drivers/thermal/broadcom/bcm2835_thermal.c +@@ -185,7 +185,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) + return err; + } + +- data->clk = devm_clk_get(&pdev->dev, NULL); ++ data->clk = devm_clk_get_enabled(&pdev->dev, NULL); + if (IS_ERR(data->clk)) { + err = PTR_ERR(data->clk); + if (err != -EPROBE_DEFER) +@@ -193,10 +193,6 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) + return err; + } + +- err = clk_prepare_enable(data->clk); +- if (err) +- return err; +- + rate = clk_get_rate(data->clk); + if ((rate < 1920000) || (rate > 5000000)) + dev_warn(&pdev->dev, +@@ -211,7 +207,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) + dev_err(&pdev->dev, + "Failed to register the thermal device: %d\n", + err); +- goto err_clk; ++ return err; + } + + /* +@@ -236,7 +232,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) + dev_err(&pdev->dev, + "Not able to read trip_temp: %d\n", + err); +- goto err_tz; ++ return err; + } + + /* set bandgap reference voltage and enable voltage regulator */ +@@ -269,32 +265,23 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) + */ + err = thermal_add_hwmon_sysfs(tz); + if (err) +- goto err_tz; ++ return err; + + bcm2835_thermal_debugfs(pdev); + + return 0; +-err_tz: +- devm_thermal_of_zone_unregister(&pdev->dev, tz); +-err_clk: +- clk_disable_unprepare(data->clk); +- +- return err; + } + +-static int bcm2835_thermal_remove(struct platform_device *pdev) ++static void bcm2835_thermal_remove(struct platform_device *pdev) + { + struct bcm2835_thermal_data *data = platform_get_drvdata(pdev); + + debugfs_remove_recursive(data->debugfsdir); +- clk_disable_unprepare(data->clk); +- +- return 0; + } + + static struct platform_driver bcm2835_thermal_driver = { + .probe = bcm2835_thermal_probe, +- .remove = bcm2835_thermal_remove, ++ .remove_new = bcm2835_thermal_remove, + .driver = { + .name = "bcm2835_thermal", + .of_match_table = bcm2835_thermal_of_match_table, +diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig +index b694d7669d320..1eb755a94940a 100644 +--- a/drivers/video/Kconfig ++++ b/drivers/video/Kconfig +@@ -11,6 +11,10 @@ config APERTURE_HELPERS + Support tracking and hand-over of aperture ownership. Required + by graphics drivers for firmware-provided framebuffers. + ++config SCREEN_INFO ++ bool ++ default n ++ + config STI_CORE + bool + depends on PARISC +diff --git a/drivers/video/Makefile b/drivers/video/Makefile +index 6bbc039508995..6bbf87c1b579e 100644 +--- a/drivers/video/Makefile ++++ b/drivers/video/Makefile +@@ -1,12 +1,16 @@ + # SPDX-License-Identifier: GPL-2.0 + + obj-$(CONFIG_APERTURE_HELPERS) += aperture.o ++obj-$(CONFIG_SCREEN_INFO) += screen_info.o + obj-$(CONFIG_STI_CORE) += sticore.o + obj-$(CONFIG_VGASTATE) += vgastate.o + obj-$(CONFIG_VIDEO_CMDLINE) += cmdline.o + obj-$(CONFIG_VIDEO_NOMODESET) += nomodeset.o + obj-$(CONFIG_HDMI) += hdmi.o + ++screen_info-y := screen_info_generic.o ++screen_info-$(CONFIG_PCI) += screen_info_pci.o ++ + obj-$(CONFIG_VT) += console/ + obj-$(CONFIG_FB_STI) += console/ + obj-$(CONFIG_LOGO) += logo/ +diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c +index c0edceea0a793..a21581b40256c 100644 +--- a/drivers/video/fbdev/vesafb.c ++++ b/drivers/video/fbdev/vesafb.c +@@ -243,6 +243,7 @@ static int vesafb_setup(char *options) + + static int vesafb_probe(struct platform_device *dev) + { ++ struct screen_info *si = &screen_info; + struct fb_info *info; + struct vesafb_par *par; + int i, err; +@@ -255,17 +256,17 @@ static int vesafb_probe(struct platform_device *dev) + fb_get_options("vesafb", &option); + vesafb_setup(option); + +- if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB) ++ if (si->orig_video_isVGA != VIDEO_TYPE_VLFB) + return -ENODEV; + +- vga_compat = (screen_info.capabilities & 2) ? 0 : 1; +- vesafb_fix.smem_start = screen_info.lfb_base; +- vesafb_defined.bits_per_pixel = screen_info.lfb_depth; ++ vga_compat = !__screen_info_vbe_mode_nonvga(si); ++ vesafb_fix.smem_start = si->lfb_base; ++ vesafb_defined.bits_per_pixel = si->lfb_depth; + if (15 == vesafb_defined.bits_per_pixel) + vesafb_defined.bits_per_pixel = 16; +- vesafb_defined.xres = screen_info.lfb_width; +- vesafb_defined.yres = screen_info.lfb_height; +- vesafb_fix.line_length = screen_info.lfb_linelength; ++ vesafb_defined.xres = si->lfb_width; ++ vesafb_defined.yres = si->lfb_height; ++ vesafb_fix.line_length = si->lfb_linelength; + vesafb_fix.visual = (vesafb_defined.bits_per_pixel == 8) ? + FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; + +@@ -277,7 +278,7 @@ static int vesafb_probe(struct platform_device *dev) + /* size_total -- all video memory we have. Used for mtrr + * entries, resource allocation and bounds + * checking. */ +- size_total = screen_info.lfb_size * 65536; ++ size_total = si->lfb_size * 65536; + if (vram_total) + size_total = vram_total * 1024 * 1024; + if (size_total < size_vmode) +@@ -297,7 +298,7 @@ static int vesafb_probe(struct platform_device *dev) + vesafb_fix.smem_len = size_remap; + + #ifndef __i386__ +- screen_info.vesapm_seg = 0; ++ si->vesapm_seg = 0; + #endif + + if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) { +@@ -317,23 +318,26 @@ static int vesafb_probe(struct platform_device *dev) + par = info->par; + info->pseudo_palette = par->pseudo_palette; + +- par->base = screen_info.lfb_base; ++ par->base = si->lfb_base; + par->size = size_total; + + printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n", +- vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages); ++ vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, ++ vesafb_fix.line_length, si->pages); + +- if (screen_info.vesapm_seg) { ++ if (si->vesapm_seg) { + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n", +- screen_info.vesapm_seg,screen_info.vesapm_off); ++ si->vesapm_seg, si->vesapm_off); + } + +- if (screen_info.vesapm_seg < 0xc000) ++ if (si->vesapm_seg < 0xc000) + ypan = pmi_setpal = 0; /* not available or some DOS TSR ... */ + + if (ypan || pmi_setpal) { ++ unsigned long pmi_phys; + unsigned short *pmi_base; +- pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off); ++ pmi_phys = ((unsigned long)si->vesapm_seg << 4) + si->vesapm_off; ++ pmi_base = (unsigned short *)phys_to_virt(pmi_phys); + pmi_start = (void*)((char*)pmi_base + pmi_base[1]); + pmi_pal = (void*)((char*)pmi_base + pmi_base[2]); + printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal); +@@ -377,14 +381,14 @@ static int vesafb_probe(struct platform_device *dev) + vesafb_defined.left_margin = (vesafb_defined.xres / 8) & 0xf8; + vesafb_defined.hsync_len = (vesafb_defined.xres / 8) & 0xf8; + +- vesafb_defined.red.offset = screen_info.red_pos; +- vesafb_defined.red.length = screen_info.red_size; +- vesafb_defined.green.offset = screen_info.green_pos; +- vesafb_defined.green.length = screen_info.green_size; +- vesafb_defined.blue.offset = screen_info.blue_pos; +- vesafb_defined.blue.length = screen_info.blue_size; +- vesafb_defined.transp.offset = screen_info.rsvd_pos; +- vesafb_defined.transp.length = screen_info.rsvd_size; ++ vesafb_defined.red.offset = si->red_pos; ++ vesafb_defined.red.length = si->red_size; ++ vesafb_defined.green.offset = si->green_pos; ++ vesafb_defined.green.length = si->green_size; ++ vesafb_defined.blue.offset = si->blue_pos; ++ vesafb_defined.blue.length = si->blue_size; ++ vesafb_defined.transp.offset = si->rsvd_pos; ++ vesafb_defined.transp.length = si->rsvd_size; + + if (vesafb_defined.bits_per_pixel <= 8) { + depth = vesafb_defined.green.length; +@@ -399,14 +403,14 @@ static int vesafb_probe(struct platform_device *dev) + (vesafb_defined.bits_per_pixel > 8) ? + "Truecolor" : (vga_compat || pmi_setpal) ? + "Pseudocolor" : "Static Pseudocolor", +- screen_info.rsvd_size, +- screen_info.red_size, +- screen_info.green_size, +- screen_info.blue_size, +- screen_info.rsvd_pos, +- screen_info.red_pos, +- screen_info.green_pos, +- screen_info.blue_pos); ++ si->rsvd_size, ++ si->red_size, ++ si->green_size, ++ si->blue_size, ++ si->rsvd_pos, ++ si->red_pos, ++ si->green_pos, ++ si->blue_pos); + + vesafb_fix.ypanstep = ypan ? 1 : 0; + vesafb_fix.ywrapstep = (ypan>1) ? 1 : 0; +diff --git a/drivers/video/screen_info_generic.c b/drivers/video/screen_info_generic.c +new file mode 100644 +index 0000000000000..64117c6367abb +--- /dev/null ++++ b/drivers/video/screen_info_generic.c +@@ -0,0 +1,146 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++#include ++#include ++#include ++#include ++ ++static void resource_init_named(struct resource *r, ++ resource_size_t start, resource_size_t size, ++ const char *name, unsigned int flags) ++{ ++ memset(r, 0, sizeof(*r)); ++ ++ r->start = start; ++ r->end = start + size - 1; ++ r->name = name; ++ r->flags = flags; ++} ++ ++static void resource_init_io_named(struct resource *r, ++ resource_size_t start, resource_size_t size, ++ const char *name) ++{ ++ resource_init_named(r, start, size, name, IORESOURCE_IO); ++} ++ ++static void resource_init_mem_named(struct resource *r, ++ resource_size_t start, resource_size_t size, ++ const char *name) ++{ ++ resource_init_named(r, start, size, name, IORESOURCE_MEM); ++} ++ ++static inline bool __screen_info_has_ega_gfx(unsigned int mode) ++{ ++ switch (mode) { ++ case 0x0d: /* 320x200-4 */ ++ case 0x0e: /* 640x200-4 */ ++ case 0x0f: /* 640x350-1 */ ++ case 0x10: /* 640x350-4 */ ++ return true; ++ default: ++ return false; ++ } ++} ++ ++static inline bool __screen_info_has_vga_gfx(unsigned int mode) ++{ ++ switch (mode) { ++ case 0x10: /* 640x480-1 */ ++ case 0x12: /* 640x480-4 */ ++ case 0x13: /* 320-200-8 */ ++ case 0x6a: /* 800x600-4 (VESA) */ ++ return true; ++ default: ++ return __screen_info_has_ega_gfx(mode); ++ } ++} ++ ++/** ++ * screen_info_resources() - Get resources from screen_info structure ++ * @si: the screen_info ++ * @r: pointer to an array of resource structures ++ * @num: number of elements in @r: ++ * ++ * Returns: ++ * The number of resources stored in @r on success, or a negative errno code otherwise. ++ * ++ * A call to screen_info_resources() returns the resources consumed by the ++ * screen_info's device or framebuffer. The result is stored in the caller-supplied ++ * array @r with up to @num elements. The function returns the number of ++ * initialized elements. ++ */ ++ssize_t screen_info_resources(const struct screen_info *si, struct resource *r, size_t num) ++{ ++ struct resource *pos = r; ++ unsigned int type = screen_info_video_type(si); ++ u64 base, size; ++ ++ switch (type) { ++ case VIDEO_TYPE_MDA: ++ if (num > 0) ++ resource_init_io_named(pos++, 0x3b0, 12, "mda"); ++ if (num > 1) ++ resource_init_io_named(pos++, 0x3bf, 0x01, "mda"); ++ if (num > 2) ++ resource_init_mem_named(pos++, 0xb0000, 0x2000, "mda"); ++ break; ++ case VIDEO_TYPE_CGA: ++ if (num > 0) ++ resource_init_io_named(pos++, 0x3d4, 0x02, "cga"); ++ if (num > 1) ++ resource_init_mem_named(pos++, 0xb8000, 0x2000, "cga"); ++ break; ++ case VIDEO_TYPE_EGAM: ++ if (num > 0) ++ resource_init_io_named(pos++, 0x3bf, 0x10, "ega"); ++ if (num > 1) ++ resource_init_mem_named(pos++, 0xb0000, 0x8000, "ega"); ++ break; ++ case VIDEO_TYPE_EGAC: ++ if (num > 0) ++ resource_init_io_named(pos++, 0x3c0, 0x20, "ega"); ++ if (num > 1) { ++ if (__screen_info_has_ega_gfx(si->orig_video_mode)) ++ resource_init_mem_named(pos++, 0xa0000, 0x10000, "ega"); ++ else ++ resource_init_mem_named(pos++, 0xb8000, 0x8000, "ega"); ++ } ++ break; ++ case VIDEO_TYPE_VGAC: ++ if (num > 0) ++ resource_init_io_named(pos++, 0x3c0, 0x20, "vga+"); ++ if (num > 1) { ++ if (__screen_info_has_vga_gfx(si->orig_video_mode)) ++ resource_init_mem_named(pos++, 0xa0000, 0x10000, "vga+"); ++ else ++ resource_init_mem_named(pos++, 0xb8000, 0x8000, "vga+"); ++ } ++ break; ++ case VIDEO_TYPE_VLFB: ++ case VIDEO_TYPE_EFI: ++ base = __screen_info_lfb_base(si); ++ if (!base) ++ break; ++ size = __screen_info_lfb_size(si, type); ++ if (!size) ++ break; ++ if (num > 0) ++ resource_init_mem_named(pos++, base, size, "lfb"); ++ break; ++ case VIDEO_TYPE_PICA_S3: ++ case VIDEO_TYPE_MIPS_G364: ++ case VIDEO_TYPE_SGI: ++ case VIDEO_TYPE_TGAC: ++ case VIDEO_TYPE_SUN: ++ case VIDEO_TYPE_SUNPCI: ++ case VIDEO_TYPE_PMAC: ++ default: ++ /* not supported */ ++ return -EINVAL; ++ } ++ ++ return pos - r; ++} ++EXPORT_SYMBOL(screen_info_resources); +diff --git a/drivers/video/screen_info_pci.c b/drivers/video/screen_info_pci.c +new file mode 100644 +index 0000000000000..6c58335171410 +--- /dev/null ++++ b/drivers/video/screen_info_pci.c +@@ -0,0 +1,136 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++#include ++#include ++#include ++#include ++ ++static struct pci_dev *screen_info_lfb_pdev; ++static size_t screen_info_lfb_bar; ++static resource_size_t screen_info_lfb_offset; ++static struct resource screen_info_lfb_res = DEFINE_RES_MEM(0, 0); ++ ++static bool __screen_info_relocation_is_valid(const struct screen_info *si, struct resource *pr) ++{ ++ u64 size = __screen_info_lfb_size(si, screen_info_video_type(si)); ++ ++ if (screen_info_lfb_offset > resource_size(pr)) ++ return false; ++ if (size > resource_size(pr)) ++ return false; ++ if (resource_size(pr) - size < screen_info_lfb_offset) ++ return false; ++ ++ return true; ++} ++ ++void screen_info_apply_fixups(void) ++{ ++ struct screen_info *si = &screen_info; ++ ++ if (screen_info_lfb_pdev) { ++ struct resource *pr = &screen_info_lfb_pdev->resource[screen_info_lfb_bar]; ++ ++ if (pr->start != screen_info_lfb_res.start) { ++ if (__screen_info_relocation_is_valid(si, pr)) { ++ /* ++ * Only update base if we have an actual ++ * relocation to a valid I/O range. ++ */ ++ __screen_info_set_lfb_base(si, pr->start + screen_info_lfb_offset); ++ pr_info("Relocating firmware framebuffer to offset %pa[d] within %pr\n", ++ &screen_info_lfb_offset, pr); ++ } else { ++ pr_warn("Invalid relocating, disabling firmware framebuffer\n"); ++ } ++ } ++ } ++} ++ ++static void screen_info_fixup_lfb(struct pci_dev *pdev) ++{ ++ unsigned int type; ++ struct resource res[SCREEN_INFO_MAX_RESOURCES]; ++ size_t i, numres; ++ int ret; ++ const struct screen_info *si = &screen_info; ++ ++ if (screen_info_lfb_pdev) ++ return; // already found ++ ++ type = screen_info_video_type(si); ++ if (type != VIDEO_TYPE_EFI) ++ return; // only applies to EFI ++ ++ ret = screen_info_resources(si, res, ARRAY_SIZE(res)); ++ if (ret < 0) ++ return; ++ numres = ret; ++ ++ for (i = 0; i < numres; ++i) { ++ struct resource *r = &res[i]; ++ const struct resource *pr; ++ ++ if (!(r->flags & IORESOURCE_MEM)) ++ continue; ++ pr = pci_find_resource(pdev, r); ++ if (!pr) ++ continue; ++ ++ /* ++ * We've found a PCI device with the framebuffer ++ * resource. Store away the parameters to track ++ * relocation of the framebuffer aperture. ++ */ ++ screen_info_lfb_pdev = pdev; ++ screen_info_lfb_bar = pr - pdev->resource; ++ screen_info_lfb_offset = r->start - pr->start; ++ memcpy(&screen_info_lfb_res, r, sizeof(screen_info_lfb_res)); ++ } ++} ++DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY, 16, ++ screen_info_fixup_lfb); ++ ++static struct pci_dev *__screen_info_pci_dev(struct resource *res) ++{ ++ struct pci_dev *pdev = NULL; ++ const struct resource *r = NULL; ++ ++ if (!(res->flags & IORESOURCE_MEM)) ++ return NULL; ++ ++ while (!r && (pdev = pci_get_base_class(PCI_BASE_CLASS_DISPLAY, pdev))) { ++ r = pci_find_resource(pdev, res); ++ } ++ ++ return pdev; ++} ++ ++/** ++ * screen_info_pci_dev() - Return PCI parent device that contains screen_info's framebuffer ++ * @si: the screen_info ++ * ++ * Returns: ++ * The screen_info's parent device or NULL on success, or a pointer-encoded ++ * errno value otherwise. The value NULL is not an error. It signals that no ++ * PCI device has been found. ++ */ ++struct pci_dev *screen_info_pci_dev(const struct screen_info *si) ++{ ++ struct resource res[SCREEN_INFO_MAX_RESOURCES]; ++ ssize_t i, numres; ++ ++ numres = screen_info_resources(si, res, ARRAY_SIZE(res)); ++ if (numres < 0) ++ return ERR_PTR(numres); ++ ++ for (i = 0; i < numres; ++i) { ++ struct pci_dev *pdev = __screen_info_pci_dev(&res[i]); ++ ++ if (pdev) ++ return pdev; ++ } ++ ++ return NULL; ++} ++EXPORT_SYMBOL(screen_info_pci_dev); +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c +index dd065349fae3a..4e999e1c14075 100644 +--- a/fs/btrfs/block-group.c ++++ b/fs/btrfs/block-group.c +@@ -1214,8 +1214,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, + block_group->space_info->total_bytes -= block_group->length; + block_group->space_info->bytes_readonly -= + (block_group->length - block_group->zone_unusable); +- block_group->space_info->bytes_zone_unusable -= +- block_group->zone_unusable; ++ btrfs_space_info_update_bytes_zone_unusable(fs_info, block_group->space_info, ++ -block_group->zone_unusable); + block_group->space_info->disk_total -= block_group->length * factor; + + spin_unlock(&block_group->space_info->lock); +@@ -1399,7 +1399,8 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force) + if (btrfs_is_zoned(cache->fs_info)) { + /* Migrate zone_unusable bytes to readonly */ + sinfo->bytes_readonly += cache->zone_unusable; +- sinfo->bytes_zone_unusable -= cache->zone_unusable; ++ btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo, ++ -cache->zone_unusable); + cache->zone_unusable = 0; + } + cache->ro++; +@@ -3023,9 +3024,11 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache) + if (btrfs_is_zoned(cache->fs_info)) { + /* Migrate zone_unusable bytes back */ + cache->zone_unusable = +- (cache->alloc_offset - cache->used) + ++ (cache->alloc_offset - cache->used - cache->pinned - ++ cache->reserved) + + (cache->length - cache->zone_capacity); +- sinfo->bytes_zone_unusable += cache->zone_unusable; ++ btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo, ++ cache->zone_unusable); + sinfo->bytes_readonly -= cache->zone_unusable; + } + num_bytes = cache->length - cache->reserved - +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index b89b558b15926..c6ecfd05e1db9 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -2749,7 +2749,8 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info, + readonly = true; + } else if (btrfs_is_zoned(fs_info)) { + /* Need reset before reusing in a zoned block group */ +- space_info->bytes_zone_unusable += len; ++ btrfs_space_info_update_bytes_zone_unusable(fs_info, space_info, ++ len); + readonly = true; + } + spin_unlock(&cache->lock); +diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c +index dcfc0425115e9..f59e599766662 100644 +--- a/fs/btrfs/free-space-cache.c ++++ b/fs/btrfs/free-space-cache.c +@@ -2721,8 +2721,10 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group, + * If the block group is read-only, we should account freed space into + * bytes_readonly. + */ +- if (!block_group->ro) ++ if (!block_group->ro) { + block_group->zone_unusable += to_unusable; ++ WARN_ON(block_group->zone_unusable > block_group->length); ++ } + spin_unlock(&ctl->tree_lock); + if (!used) { + spin_lock(&block_group->lock); +diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c +index 3f7a9605e2d3a..581bdd709ee0d 100644 +--- a/fs/btrfs/space-info.c ++++ b/fs/btrfs/space-info.c +@@ -312,7 +312,7 @@ void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info, + found->bytes_used += block_group->used; + found->disk_used += block_group->used * factor; + found->bytes_readonly += block_group->bytes_super; +- found->bytes_zone_unusable += block_group->zone_unusable; ++ btrfs_space_info_update_bytes_zone_unusable(info, found, block_group->zone_unusable); + if (block_group->length > 0) + found->full = 0; + btrfs_try_granting_tickets(info, found); +@@ -524,8 +524,7 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, + + spin_lock(&cache->lock); + avail = cache->length - cache->used - cache->pinned - +- cache->reserved - cache->delalloc_bytes - +- cache->bytes_super - cache->zone_unusable; ++ cache->reserved - cache->bytes_super - cache->zone_unusable; + btrfs_info(fs_info, + "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s", + cache->start, cache->length, cache->used, cache->pinned, +diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h +index 0bb9d14e60a82..08a3bd10addcf 100644 +--- a/fs/btrfs/space-info.h ++++ b/fs/btrfs/space-info.h +@@ -197,6 +197,7 @@ btrfs_space_info_update_##name(struct btrfs_fs_info *fs_info, \ + + DECLARE_SPACE_INFO_UPDATE(bytes_may_use, "space_info"); + DECLARE_SPACE_INFO_UPDATE(bytes_pinned, "pinned"); ++DECLARE_SPACE_INFO_UPDATE(bytes_zone_unusable, "zone_unusable"); + + int btrfs_init_space_info(struct btrfs_fs_info *fs_info); + void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info, +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index d5eb8d44c6c81..cef119a2476bb 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -453,6 +453,35 @@ static void ext4_map_blocks_es_recheck(handle_t *handle, + } + #endif /* ES_AGGRESSIVE_TEST */ + ++static int ext4_map_query_blocks(handle_t *handle, struct inode *inode, ++ struct ext4_map_blocks *map) ++{ ++ unsigned int status; ++ int retval; ++ ++ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) ++ retval = ext4_ext_map_blocks(handle, inode, map, 0); ++ else ++ retval = ext4_ind_map_blocks(handle, inode, map, 0); ++ ++ if (retval <= 0) ++ return retval; ++ ++ if (unlikely(retval != map->m_len)) { ++ ext4_warning(inode->i_sb, ++ "ES len assertion failed for inode " ++ "%lu: retval %d != map->m_len %d", ++ inode->i_ino, retval, map->m_len); ++ WARN_ON(1); ++ } ++ ++ status = map->m_flags & EXT4_MAP_UNWRITTEN ? ++ EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; ++ ext4_es_insert_extent(inode, map->m_lblk, map->m_len, ++ map->m_pblk, status); ++ return retval; ++} ++ + /* + * The ext4_map_blocks() function tries to look up the requested blocks, + * and returns if the blocks are already mapped. +@@ -1705,12 +1734,10 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, + + /* Lookup extent status tree firstly */ + if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) { +- if (ext4_es_is_hole(&es)) { +- retval = 0; +- down_read(&EXT4_I(inode)->i_data_sem); ++ if (ext4_es_is_hole(&es)) + goto add_delayed; +- } + ++found: + /* + * Delayed extent could be allocated by fallocate. + * So we need to check it. +@@ -1747,49 +1774,42 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, + down_read(&EXT4_I(inode)->i_data_sem); + if (ext4_has_inline_data(inode)) + retval = 0; +- else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) +- retval = ext4_ext_map_blocks(NULL, inode, map, 0); + else +- retval = ext4_ind_map_blocks(NULL, inode, map, 0); ++ retval = ext4_map_query_blocks(NULL, inode, map); ++ up_read(&EXT4_I(inode)->i_data_sem); ++ if (retval) ++ return retval; + + add_delayed: +- if (retval == 0) { +- int ret; +- +- /* +- * XXX: __block_prepare_write() unmaps passed block, +- * is it OK? +- */ +- +- ret = ext4_insert_delayed_block(inode, map->m_lblk); +- if (ret != 0) { +- retval = ret; +- goto out_unlock; ++ down_write(&EXT4_I(inode)->i_data_sem); ++ /* ++ * Page fault path (ext4_page_mkwrite does not take i_rwsem) ++ * and fallocate path (no folio lock) can race. Make sure we ++ * lookup the extent status tree here again while i_data_sem ++ * is held in write mode, before inserting a new da entry in ++ * the extent status tree. ++ */ ++ if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) { ++ if (!ext4_es_is_hole(&es)) { ++ up_write(&EXT4_I(inode)->i_data_sem); ++ goto found; + } +- +- map_bh(bh, inode->i_sb, invalid_block); +- set_buffer_new(bh); +- set_buffer_delay(bh); +- } else if (retval > 0) { +- unsigned int status; +- +- if (unlikely(retval != map->m_len)) { +- ext4_warning(inode->i_sb, +- "ES len assertion failed for inode " +- "%lu: retval %d != map->m_len %d", +- inode->i_ino, retval, map->m_len); +- WARN_ON(1); ++ } else if (!ext4_has_inline_data(inode)) { ++ retval = ext4_map_query_blocks(NULL, inode, map); ++ if (retval) { ++ up_write(&EXT4_I(inode)->i_data_sem); ++ return retval; + } +- +- status = map->m_flags & EXT4_MAP_UNWRITTEN ? +- EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; +- ext4_es_insert_extent(inode, map->m_lblk, map->m_len, +- map->m_pblk, status); + } + +-out_unlock: +- up_read((&EXT4_I(inode)->i_data_sem)); ++ retval = ext4_insert_delayed_block(inode, map->m_lblk); ++ up_write(&EXT4_I(inode)->i_data_sem); ++ if (retval) ++ return retval; + ++ map_bh(bh, inode->i_sb, invalid_block); ++ set_buffer_new(bh); ++ set_buffer_delay(bh); + return retval; + } + +diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c +index 22080606b8769..804958c6de34c 100644 +--- a/fs/f2fs/segment.c ++++ b/fs/f2fs/segment.c +@@ -3350,7 +3350,9 @@ static int __get_segment_type_6(struct f2fs_io_info *fio) + if (page_private_gcing(fio->page)) { + if (fio->sbi->am.atgc_enabled && + (fio->io_type == FS_DATA_IO) && +- (fio->sbi->gc_mode != GC_URGENT_HIGH)) ++ (fio->sbi->gc_mode != GC_URGENT_HIGH) && ++ __is_valid_data_blkaddr(fio->old_blkaddr) && ++ !is_inode_flag_set(inode, FI_OPU_WRITE)) + return CURSEG_ALL_DATA_ATGC; + else + return CURSEG_COLD_DATA; +diff --git a/fs/file.c b/fs/file.c +index a815f6eddc511..b64a84137c00f 100644 +--- a/fs/file.c ++++ b/fs/file.c +@@ -1124,6 +1124,7 @@ __releases(&files->file_lock) + * tables and this condition does not arise without those. + */ + fdt = files_fdtable(files); ++ fd = array_index_nospec(fd, fdt->max_fds); + tofree = fdt->fd[fd]; + if (!tofree && fd_is_open(fd, fdt)) + goto Ebusy; +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c +index 5b5cdc747cef3..071a71eb1a2d4 100644 +--- a/fs/proc/proc_sysctl.c ++++ b/fs/proc/proc_sysctl.c +@@ -480,12 +480,10 @@ static struct inode *proc_sys_make_inode(struct super_block *sb, + make_empty_dir_inode(inode); + } + ++ inode->i_uid = GLOBAL_ROOT_UID; ++ inode->i_gid = GLOBAL_ROOT_GID; + if (root->set_ownership) +- root->set_ownership(head, table, &inode->i_uid, &inode->i_gid); +- else { +- inode->i_uid = GLOBAL_ROOT_UID; +- inode->i_gid = GLOBAL_ROOT_GID; +- } ++ root->set_ownership(head, &inode->i_uid, &inode->i_gid); + + return inode; + } +diff --git a/include/linux/leds.h b/include/linux/leds.h +index aa16dc2a8230f..d3056bc6f0a1a 100644 +--- a/include/linux/leds.h ++++ b/include/linux/leds.h +@@ -474,6 +474,9 @@ struct led_trigger { + int (*activate)(struct led_classdev *led_cdev); + void (*deactivate)(struct led_classdev *led_cdev); + ++ /* Brightness set by led_trigger_event */ ++ enum led_brightness brightness; ++ + /* LED-private triggers have this set */ + struct led_hw_trigger_type *trigger_type; + +@@ -527,22 +530,11 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev) + return led_cdev->trigger_data; + } + +-/** +- * led_trigger_rename_static - rename a trigger +- * @name: the new trigger name +- * @trig: the LED trigger to rename +- * +- * Change a LED trigger name by copying the string passed in +- * name into current trigger name, which MUST be large +- * enough for the new string. +- * +- * Note that name must NOT point to the same string used +- * during LED registration, as that could lead to races. +- * +- * This is meant to be used on triggers with statically +- * allocated name. +- */ +-void led_trigger_rename_static(const char *name, struct led_trigger *trig); ++static inline enum led_brightness ++led_trigger_get_brightness(const struct led_trigger *trigger) ++{ ++ return trigger ? trigger->brightness : LED_OFF; ++} + + #define module_led_trigger(__led_trigger) \ + module_driver(__led_trigger, led_trigger_register, \ +@@ -580,6 +572,12 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev) + return NULL; + } + ++static inline enum led_brightness ++led_trigger_get_brightness(const struct led_trigger *trigger) ++{ ++ return LED_OFF; ++} ++ + #endif /* CONFIG_LEDS_TRIGGERS */ + + /* Trigger specific enum */ +diff --git a/include/linux/pci.h b/include/linux/pci.h +index f141300116219..7b18a4b3efb0e 100644 +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -1182,6 +1182,8 @@ struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); + struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, + unsigned int devfn); + struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); ++struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from); ++ + int pci_dev_present(const struct pci_device_id *ids); + + int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn, +@@ -1958,6 +1960,9 @@ static inline struct pci_dev *pci_get_class(unsigned int class, + struct pci_dev *from) + { return NULL; } + ++static inline struct pci_dev *pci_get_base_class(unsigned int class, ++ struct pci_dev *from) ++{ return NULL; } + + static inline int pci_dev_present(const struct pci_device_id *ids) + { return 0; } +diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h +index eab7081392d50..6a4a3cec4638b 100644 +--- a/include/linux/screen_info.h ++++ b/include/linux/screen_info.h +@@ -4,6 +4,142 @@ + + #include + ++#include ++ ++/** ++ * SCREEN_INFO_MAX_RESOURCES - maximum number of resources per screen_info ++ */ ++#define SCREEN_INFO_MAX_RESOURCES 3 ++ ++struct pci_dev; ++struct resource; ++ ++static inline bool __screen_info_has_lfb(unsigned int type) ++{ ++ return (type == VIDEO_TYPE_VLFB) || (type == VIDEO_TYPE_EFI); ++} ++ ++static inline u64 __screen_info_lfb_base(const struct screen_info *si) ++{ ++ u64 lfb_base = si->lfb_base; ++ ++ if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE) ++ lfb_base |= (u64)si->ext_lfb_base << 32; ++ ++ return lfb_base; ++} ++ ++static inline void __screen_info_set_lfb_base(struct screen_info *si, u64 lfb_base) ++{ ++ si->lfb_base = lfb_base & GENMASK_ULL(31, 0); ++ si->ext_lfb_base = (lfb_base & GENMASK_ULL(63, 32)) >> 32; ++ ++ if (si->ext_lfb_base) ++ si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE; ++ else ++ si->capabilities &= ~VIDEO_CAPABILITY_64BIT_BASE; ++} ++ ++static inline u64 __screen_info_lfb_size(const struct screen_info *si, unsigned int type) ++{ ++ u64 lfb_size = si->lfb_size; ++ ++ if (type == VIDEO_TYPE_VLFB) ++ lfb_size <<= 16; ++ return lfb_size; ++} ++ ++static inline bool __screen_info_vbe_mode_nonvga(const struct screen_info *si) ++{ ++ /* ++ * VESA modes typically run on VGA hardware. Set bit 5 signals that this ++ * is not the case. Drivers can then not make use of VGA resources. See ++ * Sec 4.4 of the VBE 2.0 spec. ++ */ ++ return si->vesa_attributes & BIT(5); ++} ++ ++static inline unsigned int __screen_info_video_type(unsigned int type) ++{ ++ switch (type) { ++ case VIDEO_TYPE_MDA: ++ case VIDEO_TYPE_CGA: ++ case VIDEO_TYPE_EGAM: ++ case VIDEO_TYPE_EGAC: ++ case VIDEO_TYPE_VGAC: ++ case VIDEO_TYPE_VLFB: ++ case VIDEO_TYPE_PICA_S3: ++ case VIDEO_TYPE_MIPS_G364: ++ case VIDEO_TYPE_SGI: ++ case VIDEO_TYPE_TGAC: ++ case VIDEO_TYPE_SUN: ++ case VIDEO_TYPE_SUNPCI: ++ case VIDEO_TYPE_PMAC: ++ case VIDEO_TYPE_EFI: ++ return type; ++ default: ++ return 0; ++ } ++} ++ ++/** ++ * screen_info_video_type() - Decodes the video type from struct screen_info ++ * @si: an instance of struct screen_info ++ * ++ * Returns: ++ * A VIDEO_TYPE_ constant representing si's type of video display, or 0 otherwise. ++ */ ++static inline unsigned int screen_info_video_type(const struct screen_info *si) ++{ ++ unsigned int type; ++ ++ // check if display output is on ++ if (!si->orig_video_isVGA) ++ return 0; ++ ++ // check for a known VIDEO_TYPE_ constant ++ type = __screen_info_video_type(si->orig_video_isVGA); ++ if (type) ++ return si->orig_video_isVGA; ++ ++ // check if text mode has been initialized ++ if (!si->orig_video_lines || !si->orig_video_cols) ++ return 0; ++ ++ // 80x25 text, mono ++ if (si->orig_video_mode == 0x07) { ++ if ((si->orig_video_ega_bx & 0xff) != 0x10) ++ return VIDEO_TYPE_EGAM; ++ else ++ return VIDEO_TYPE_MDA; ++ } ++ ++ // EGA/VGA, 16 colors ++ if ((si->orig_video_ega_bx & 0xff) != 0x10) { ++ if (si->orig_video_isVGA) ++ return VIDEO_TYPE_VGAC; ++ else ++ return VIDEO_TYPE_EGAC; ++ } ++ ++ // the rest... ++ return VIDEO_TYPE_CGA; ++} ++ ++ssize_t screen_info_resources(const struct screen_info *si, struct resource *r, size_t num); ++ ++#if defined(CONFIG_PCI) ++void screen_info_apply_fixups(void); ++struct pci_dev *screen_info_pci_dev(const struct screen_info *si); ++#else ++static inline void screen_info_apply_fixups(void) ++{ } ++static inline struct pci_dev *screen_info_pci_dev(const struct screen_info *si) ++{ ++ return NULL; ++} ++#endif ++ + extern struct screen_info screen_info; + + #endif /* _SCREEN_INFO_H */ +diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h +index 61b40ea81f4d3..698a71422a14b 100644 +--- a/include/linux/sysctl.h ++++ b/include/linux/sysctl.h +@@ -205,7 +205,6 @@ struct ctl_table_root { + struct ctl_table_set default_set; + struct ctl_table_set *(*lookup)(struct ctl_table_root *root); + void (*set_ownership)(struct ctl_table_header *head, +- struct ctl_table *table, + kuid_t *uid, kgid_t *gid); + int (*permissions)(struct ctl_table_header *head, struct ctl_table *table); + }; +diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h +index b2db2c2f1c577..3c4d5ef6d4463 100644 +--- a/include/trace/events/btrfs.h ++++ b/include/trace/events/btrfs.h +@@ -2430,6 +2430,14 @@ DEFINE_EVENT(btrfs__space_info_update, update_bytes_pinned, + TP_ARGS(fs_info, sinfo, old, diff) + ); + ++DEFINE_EVENT(btrfs__space_info_update, update_bytes_zone_unusable, ++ ++ TP_PROTO(const struct btrfs_fs_info *fs_info, ++ const struct btrfs_space_info *sinfo, u64 old, s64 diff), ++ ++ TP_ARGS(fs_info, sinfo, old, diff) ++); ++ + DECLARE_EVENT_CLASS(btrfs_raid56_bio, + + TP_PROTO(const struct btrfs_raid_bio *rbio, +diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h +index 563e48617374d..54e8fb5a229cd 100644 +--- a/include/trace/events/mptcp.h ++++ b/include/trace/events/mptcp.h +@@ -34,7 +34,7 @@ TRACE_EVENT(mptcp_subflow_get_send, + struct sock *ssk; + + __entry->active = mptcp_subflow_active(subflow); +- __entry->backup = subflow->backup; ++ __entry->backup = subflow->backup || subflow->request_bkup; + + if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock)) + __entry->free = sk_stream_memory_free(subflow->tcp_sock); +diff --git a/init/Kconfig b/init/Kconfig +index e403a29256357..e173364abd6c0 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1898,6 +1898,7 @@ config RUST + depends on !MODVERSIONS + depends on !GCC_PLUGINS + depends on !RANDSTRUCT ++ depends on !SHADOW_CALL_STACK + depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE + help + Enables Rust support in the kernel. +diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c +index 8c62e443f78b3..b2f39a86f4734 100644 +--- a/ipc/ipc_sysctl.c ++++ b/ipc/ipc_sysctl.c +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + #include "util.h" + + static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write, +@@ -190,25 +191,56 @@ static int set_is_seen(struct ctl_table_set *set) + return ¤t->nsproxy->ipc_ns->ipc_set == set; + } + ++static void ipc_set_ownership(struct ctl_table_header *head, ++ kuid_t *uid, kgid_t *gid) ++{ ++ struct ipc_namespace *ns = ++ container_of(head->set, struct ipc_namespace, ipc_set); ++ ++ kuid_t ns_root_uid = make_kuid(ns->user_ns, 0); ++ kgid_t ns_root_gid = make_kgid(ns->user_ns, 0); ++ ++ *uid = uid_valid(ns_root_uid) ? ns_root_uid : GLOBAL_ROOT_UID; ++ *gid = gid_valid(ns_root_gid) ? ns_root_gid : GLOBAL_ROOT_GID; ++} ++ + static int ipc_permissions(struct ctl_table_header *head, struct ctl_table *table) + { + int mode = table->mode; + + #ifdef CONFIG_CHECKPOINT_RESTORE +- struct ipc_namespace *ns = current->nsproxy->ipc_ns; ++ struct ipc_namespace *ns = ++ container_of(head->set, struct ipc_namespace, ipc_set); + + if (((table->data == &ns->ids[IPC_SEM_IDS].next_id) || + (table->data == &ns->ids[IPC_MSG_IDS].next_id) || + (table->data == &ns->ids[IPC_SHM_IDS].next_id)) && + checkpoint_restore_ns_capable(ns->user_ns)) + mode = 0666; ++ else + #endif +- return mode; ++ { ++ kuid_t ns_root_uid; ++ kgid_t ns_root_gid; ++ ++ ipc_set_ownership(head, &ns_root_uid, &ns_root_gid); ++ ++ if (uid_eq(current_euid(), ns_root_uid)) ++ mode >>= 6; ++ ++ else if (in_egroup_p(ns_root_gid)) ++ mode >>= 3; ++ } ++ ++ mode &= 7; ++ ++ return (mode << 6) | (mode << 3) | mode; + } + + static struct ctl_table_root set_root = { + .lookup = set_lookup, + .permissions = ipc_permissions, ++ .set_ownership = ipc_set_ownership, + }; + + bool setup_ipc_sysctls(struct ipc_namespace *ns) +diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c +index ebb5ed81c151a..6bb1c5397c69b 100644 +--- a/ipc/mq_sysctl.c ++++ b/ipc/mq_sysctl.c +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + + static int msg_max_limit_min = MIN_MSGMAX; + static int msg_max_limit_max = HARD_MSGMAX; +@@ -76,8 +77,42 @@ static int set_is_seen(struct ctl_table_set *set) + return ¤t->nsproxy->ipc_ns->mq_set == set; + } + ++static void mq_set_ownership(struct ctl_table_header *head, ++ kuid_t *uid, kgid_t *gid) ++{ ++ struct ipc_namespace *ns = ++ container_of(head->set, struct ipc_namespace, mq_set); ++ ++ kuid_t ns_root_uid = make_kuid(ns->user_ns, 0); ++ kgid_t ns_root_gid = make_kgid(ns->user_ns, 0); ++ ++ *uid = uid_valid(ns_root_uid) ? ns_root_uid : GLOBAL_ROOT_UID; ++ *gid = gid_valid(ns_root_gid) ? ns_root_gid : GLOBAL_ROOT_GID; ++} ++ ++static int mq_permissions(struct ctl_table_header *head, struct ctl_table *table) ++{ ++ int mode = table->mode; ++ kuid_t ns_root_uid; ++ kgid_t ns_root_gid; ++ ++ mq_set_ownership(head, &ns_root_uid, &ns_root_gid); ++ ++ if (uid_eq(current_euid(), ns_root_uid)) ++ mode >>= 6; ++ ++ else if (in_egroup_p(ns_root_gid)) ++ mode >>= 3; ++ ++ mode &= 7; ++ ++ return (mode << 6) | (mode << 3) | mode; ++} ++ + static struct ctl_table_root set_root = { + .lookup = set_lookup, ++ .permissions = mq_permissions, ++ .set_ownership = mq_set_ownership, + }; + + bool setup_mq_sysctls(struct ipc_namespace *ns) +diff --git a/mm/Kconfig b/mm/Kconfig +index 264a2df5ecf5b..ece4f2847e2b4 100644 +--- a/mm/Kconfig ++++ b/mm/Kconfig +@@ -704,6 +704,17 @@ config HUGETLB_PAGE_SIZE_VARIABLE + config CONTIG_ALLOC + def_bool (MEMORY_ISOLATION && COMPACTION) || CMA + ++config PCP_BATCH_SCALE_MAX ++ int "Maximum scale factor of PCP (Per-CPU pageset) batch allocate/free" ++ default 5 ++ range 0 6 ++ help ++ In page allocator, PCP (Per-CPU pageset) is refilled and drained in ++ batches. The batch number is scaled automatically to improve page ++ allocation/free throughput. But too large scale factor may hurt ++ latency. This option sets the upper limit of scale factor to limit ++ the maximum latency. ++ + config PHYS_ADDR_T_64BIT + def_bool 64BIT + +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index e99d3223f0fc2..39bdbfb5313fb 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -2185,14 +2185,21 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) + */ + static void drain_pages_zone(unsigned int cpu, struct zone *zone) + { +- struct per_cpu_pages *pcp; ++ struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); ++ int count; + +- pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); +- if (pcp->count) { ++ do { + spin_lock(&pcp->lock); +- free_pcppages_bulk(zone, pcp->count, pcp, 0); ++ count = pcp->count; ++ if (count) { ++ int to_drain = min(count, ++ pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX); ++ ++ free_pcppages_bulk(zone, to_drain, pcp, 0); ++ count -= to_drain; ++ } + spin_unlock(&pcp->lock); +- } ++ } while (count); + } + + /* +@@ -2343,7 +2350,7 @@ static int nr_pcp_free(struct per_cpu_pages *pcp, int high, bool free_high) + * freeing of pages without any allocation. + */ + batch <<= pcp->free_factor; +- if (batch < max_nr_free) ++ if (batch < max_nr_free && pcp->free_factor < CONFIG_PCP_BATCH_SCALE_MAX) + pcp->free_factor++; + batch = clamp(batch, min_nr_free, max_nr_free); + +diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c +index b3f5714dab342..6dab0c99c82c7 100644 +--- a/net/bluetooth/hci_sync.c ++++ b/net/bluetooth/hci_sync.c +@@ -2862,6 +2862,27 @@ static int hci_passive_scan_sync(struct hci_dev *hdev) + */ + filter_policy = hci_update_accept_list_sync(hdev); + ++ /* If suspended and filter_policy set to 0x00 (no acceptlist) then ++ * passive scanning cannot be started since that would require the host ++ * to be woken up to process the reports. ++ */ ++ if (hdev->suspended && !filter_policy) { ++ /* Check if accept list is empty then there is no need to scan ++ * while suspended. ++ */ ++ if (list_empty(&hdev->le_accept_list)) ++ return 0; ++ ++ /* If there are devices is the accept_list that means some ++ * devices could not be programmed which in non-suspended case ++ * means filter_policy needs to be set to 0x00 so the host needs ++ * to filter, but since this is treating suspended case we ++ * can ignore device needing host to filter to allow devices in ++ * the acceptlist to be able to wakeup the system. ++ */ ++ filter_policy = 0x01; ++ } ++ + /* When the controller is using random resolvable addresses and + * with that having LE privacy enabled, then controllers with + * Extended Scanner Filter Policies support can now enable support +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index 7ea66de1442cc..8573dad6d8171 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -3263,7 +3263,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, + if (ifm->ifi_index > 0) + dev = __dev_get_by_index(tgt_net, ifm->ifi_index); + else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) +- dev = rtnl_dev_get(net, tb); ++ dev = rtnl_dev_get(tgt_net, tb); + else if (tb[IFLA_GROUP]) + err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP])); + else +diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c +index 56f6ecc43451e..12ca666d6e2c1 100644 +--- a/net/ipv4/netfilter/iptable_nat.c ++++ b/net/ipv4/netfilter/iptable_nat.c +@@ -145,25 +145,27 @@ static struct pernet_operations iptable_nat_net_ops = { + + static int __init iptable_nat_init(void) + { +- int ret = xt_register_template(&nf_nat_ipv4_table, +- iptable_nat_table_init); ++ int ret; + ++ /* net->gen->ptr[iptable_nat_net_id] must be allocated ++ * before calling iptable_nat_table_init(). ++ */ ++ ret = register_pernet_subsys(&iptable_nat_net_ops); + if (ret < 0) + return ret; + +- ret = register_pernet_subsys(&iptable_nat_net_ops); +- if (ret < 0) { +- xt_unregister_template(&nf_nat_ipv4_table); +- return ret; +- } ++ ret = xt_register_template(&nf_nat_ipv4_table, ++ iptable_nat_table_init); ++ if (ret < 0) ++ unregister_pernet_subsys(&iptable_nat_net_ops); + + return ret; + } + + static void __exit iptable_nat_exit(void) + { +- unregister_pernet_subsys(&iptable_nat_net_ops); + xt_unregister_template(&nf_nat_ipv4_table); ++ unregister_pernet_subsys(&iptable_nat_net_ops); + } + + module_init(iptable_nat_init); +diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c +index 3b4dafefb4b03..e143562077958 100644 +--- a/net/ipv4/syncookies.c ++++ b/net/ipv4/syncookies.c +@@ -424,7 +424,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) + } + + /* Try to redo what tcp_v4_send_synack did. */ +- req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW); ++ req->rsk_window_clamp = READ_ONCE(tp->window_clamp) ? : ++ dst_metric(&rt->dst, RTAX_WINDOW); + /* limit the window selection if the user enforce a smaller rx buffer */ + full_space = tcp_full_space(sk); + if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index 91c3d8264059d..1e3202f2b7a87 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -1723,7 +1723,7 @@ int tcp_set_rcvlowat(struct sock *sk, int val) + space = tcp_space_from_win(sk, val); + if (space > sk->sk_rcvbuf) { + WRITE_ONCE(sk->sk_rcvbuf, space); +- tcp_sk(sk)->window_clamp = val; ++ WRITE_ONCE(tcp_sk(sk)->window_clamp, val); + } + return 0; + } +@@ -3386,7 +3386,7 @@ int tcp_set_window_clamp(struct sock *sk, int val) + if (!val) { + if (sk->sk_state != TCP_CLOSE) + return -EINVAL; +- tp->window_clamp = 0; ++ WRITE_ONCE(tp->window_clamp, 0); + } else { + u32 new_rcv_ssthresh, old_window_clamp = tp->window_clamp; + u32 new_window_clamp = val < SOCK_MIN_RCVBUF / 2 ? +@@ -3395,7 +3395,7 @@ int tcp_set_window_clamp(struct sock *sk, int val) + if (new_window_clamp == old_window_clamp) + return 0; + +- tp->window_clamp = new_window_clamp; ++ WRITE_ONCE(tp->window_clamp, new_window_clamp); + if (new_window_clamp < old_window_clamp) { + /* need to apply the reserved mem provisioning only + * when shrinking the window clamp +@@ -4020,7 +4020,7 @@ int do_tcp_getsockopt(struct sock *sk, int level, + TCP_RTO_MAX / HZ); + break; + case TCP_WINDOW_CLAMP: +- val = tp->window_clamp; ++ val = READ_ONCE(tp->window_clamp); + break; + case TCP_INFO: { + struct tcp_info info; +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index c2e4dac42453b..d0364cff65c9f 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -570,19 +570,20 @@ static void tcp_init_buffer_space(struct sock *sk) + maxwin = tcp_full_space(sk); + + if (tp->window_clamp >= maxwin) { +- tp->window_clamp = maxwin; ++ WRITE_ONCE(tp->window_clamp, maxwin); + + if (tcp_app_win && maxwin > 4 * tp->advmss) +- tp->window_clamp = max(maxwin - +- (maxwin >> tcp_app_win), +- 4 * tp->advmss); ++ WRITE_ONCE(tp->window_clamp, ++ max(maxwin - (maxwin >> tcp_app_win), ++ 4 * tp->advmss)); + } + + /* Force reservation of one segment. */ + if (tcp_app_win && + tp->window_clamp > 2 * tp->advmss && + tp->window_clamp + tp->advmss > maxwin) +- tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss); ++ WRITE_ONCE(tp->window_clamp, ++ max(2 * tp->advmss, maxwin - tp->advmss)); + + tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); + tp->snd_cwnd_stamp = tcp_jiffies32; +@@ -747,8 +748,7 @@ void tcp_rcv_space_adjust(struct sock *sk) + * + */ + +- if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) && +- !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { ++ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf)) { + u64 rcvwin, grow; + int rcvbuf; + +@@ -764,11 +764,22 @@ void tcp_rcv_space_adjust(struct sock *sk) + + rcvbuf = min_t(u64, tcp_space_from_win(sk, rcvwin), + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); +- if (rcvbuf > sk->sk_rcvbuf) { +- WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); ++ if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { ++ if (rcvbuf > sk->sk_rcvbuf) { ++ WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); + +- /* Make the window clamp follow along. */ +- tp->window_clamp = tcp_win_from_space(sk, rcvbuf); ++ /* Make the window clamp follow along. */ ++ WRITE_ONCE(tp->window_clamp, ++ tcp_win_from_space(sk, rcvbuf)); ++ } ++ } else { ++ /* Make the window clamp follow along while being bounded ++ * by SO_RCVBUF. ++ */ ++ int clamp = tcp_win_from_space(sk, min(rcvbuf, sk->sk_rcvbuf)); ++ ++ if (clamp > tp->window_clamp) ++ WRITE_ONCE(tp->window_clamp, clamp); + } + } + tp->rcvq_space.space = copied; +@@ -6347,7 +6358,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, + + if (!tp->rx_opt.wscale_ok) { + tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; +- tp->window_clamp = min(tp->window_clamp, 65535U); ++ WRITE_ONCE(tp->window_clamp, ++ min(tp->window_clamp, 65535U)); + } + + if (tp->rx_opt.saw_tstamp) { +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 5631041ae12cb..15c49d559db53 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -203,16 +203,17 @@ static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt) + * This MUST be enforced by all callers. + */ + void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss, +- __u32 *rcv_wnd, __u32 *window_clamp, ++ __u32 *rcv_wnd, __u32 *__window_clamp, + int wscale_ok, __u8 *rcv_wscale, + __u32 init_rcv_wnd) + { + unsigned int space = (__space < 0 ? 0 : __space); ++ u32 window_clamp = READ_ONCE(*__window_clamp); + + /* If no clamp set the clamp to the max possible scaled window */ +- if (*window_clamp == 0) +- (*window_clamp) = (U16_MAX << TCP_MAX_WSCALE); +- space = min(*window_clamp, space); ++ if (window_clamp == 0) ++ window_clamp = (U16_MAX << TCP_MAX_WSCALE); ++ space = min(window_clamp, space); + + /* Quantize space offering to a multiple of mss if possible. */ + if (space > mss) +@@ -239,12 +240,13 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss, + /* Set window scaling on max possible window */ + space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); + space = max_t(u32, space, READ_ONCE(sysctl_rmem_max)); +- space = min_t(u32, space, *window_clamp); ++ space = min_t(u32, space, window_clamp); + *rcv_wscale = clamp_t(int, ilog2(space) - 15, + 0, TCP_MAX_WSCALE); + } + /* Set the clamp no higher than max representable value */ +- (*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp); ++ WRITE_ONCE(*__window_clamp, ++ min_t(__u32, U16_MAX << (*rcv_wscale), window_clamp)); + } + EXPORT_SYMBOL(tcp_select_initial_window); + +@@ -3787,7 +3789,7 @@ static void tcp_connect_init(struct sock *sk) + tcp_ca_dst_init(sk, dst); + + if (!tp->window_clamp) +- tp->window_clamp = dst_metric(dst, RTAX_WINDOW); ++ WRITE_ONCE(tp->window_clamp, dst_metric(dst, RTAX_WINDOW)); + tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); + + tcp_initialize_rcv_mss(sk); +@@ -3795,7 +3797,7 @@ static void tcp_connect_init(struct sock *sk) + /* limit the window selection if the user enforce a smaller rx buffer */ + if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && + (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) +- tp->window_clamp = tcp_full_space(sk); ++ WRITE_ONCE(tp->window_clamp, tcp_full_space(sk)); + + rcv_wnd = tcp_rwnd_init_bpf(sk); + if (rcv_wnd == 0) +diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c +index 68debc78189c2..2062ab94721e3 100644 +--- a/net/ipv6/ndisc.c ++++ b/net/ipv6/ndisc.c +@@ -227,6 +227,7 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev, + return NULL; + memset(ndopts, 0, sizeof(*ndopts)); + while (opt_len) { ++ bool unknown = false; + int l; + if (opt_len < sizeof(struct nd_opt_hdr)) + return NULL; +@@ -262,22 +263,23 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev, + break; + #endif + default: +- if (ndisc_is_useropt(dev, nd_opt)) { +- ndopts->nd_useropts_end = nd_opt; +- if (!ndopts->nd_useropts) +- ndopts->nd_useropts = nd_opt; +- } else { +- /* +- * Unknown options must be silently ignored, +- * to accommodate future extension to the +- * protocol. +- */ +- ND_PRINTK(2, notice, +- "%s: ignored unsupported option; type=%d, len=%d\n", +- __func__, +- nd_opt->nd_opt_type, +- nd_opt->nd_opt_len); +- } ++ unknown = true; ++ } ++ if (ndisc_is_useropt(dev, nd_opt)) { ++ ndopts->nd_useropts_end = nd_opt; ++ if (!ndopts->nd_useropts) ++ ndopts->nd_useropts = nd_opt; ++ } else if (unknown) { ++ /* ++ * Unknown options must be silently ignored, ++ * to accommodate future extension to the ++ * protocol. ++ */ ++ ND_PRINTK(2, notice, ++ "%s: ignored unsupported option; type=%d, len=%d\n", ++ __func__, ++ nd_opt->nd_opt_type, ++ nd_opt->nd_opt_len); + } + next_opt: + opt_len -= l; +diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c +index bf3cb3a13600c..52d597b16b658 100644 +--- a/net/ipv6/netfilter/ip6table_nat.c ++++ b/net/ipv6/netfilter/ip6table_nat.c +@@ -147,23 +147,27 @@ static struct pernet_operations ip6table_nat_net_ops = { + + static int __init ip6table_nat_init(void) + { +- int ret = xt_register_template(&nf_nat_ipv6_table, +- ip6table_nat_table_init); ++ int ret; + ++ /* net->gen->ptr[ip6table_nat_net_id] must be allocated ++ * before calling ip6t_nat_register_lookups(). ++ */ ++ ret = register_pernet_subsys(&ip6table_nat_net_ops); + if (ret < 0) + return ret; + +- ret = register_pernet_subsys(&ip6table_nat_net_ops); ++ ret = xt_register_template(&nf_nat_ipv6_table, ++ ip6table_nat_table_init); + if (ret) +- xt_unregister_template(&nf_nat_ipv6_table); ++ unregister_pernet_subsys(&ip6table_nat_net_ops); + + return ret; + } + + static void __exit ip6table_nat_exit(void) + { +- unregister_pernet_subsys(&ip6table_nat_net_ops); + xt_unregister_template(&nf_nat_ipv6_table); ++ unregister_pernet_subsys(&ip6table_nat_net_ops); + } + + module_init(ip6table_nat_init); +diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c +index 8698b49dfc8de..593ead8a45d79 100644 +--- a/net/ipv6/syncookies.c ++++ b/net/ipv6/syncookies.c +@@ -243,7 +243,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) + goto out_free; + } + +- req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); ++ req->rsk_window_clamp = READ_ONCE(tp->window_clamp) ? :dst_metric(dst, RTAX_WINDOW); + /* limit the window selection if the user enforce a smaller rx buffer */ + full_space = tcp_full_space(sk); + if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && +diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c +index 498a0c35b7bb2..815b1df0b2d19 100644 +--- a/net/iucv/af_iucv.c ++++ b/net/iucv/af_iucv.c +@@ -335,8 +335,8 @@ static void iucv_sever_path(struct sock *sk, int with_user_data) + struct iucv_sock *iucv = iucv_sk(sk); + struct iucv_path *path = iucv->path; + +- if (iucv->path) { +- iucv->path = NULL; ++ /* Whoever resets the path pointer, must sever and free it. */ ++ if (xchg(&iucv->path, NULL)) { + if (with_user_data) { + low_nmcpy(user_data, iucv->src_name); + high_nmcpy(user_data, iucv->dst_name); +diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c +index c30405e768337..7884217f33eb2 100644 +--- a/net/mptcp/mib.c ++++ b/net/mptcp/mib.c +@@ -19,7 +19,9 @@ static const struct snmp_mib mptcp_snmp_list[] = { + SNMP_MIB_ITEM("MPTCPRetrans", MPTCP_MIB_RETRANSSEGS), + SNMP_MIB_ITEM("MPJoinNoTokenFound", MPTCP_MIB_JOINNOTOKEN), + SNMP_MIB_ITEM("MPJoinSynRx", MPTCP_MIB_JOINSYNRX), ++ SNMP_MIB_ITEM("MPJoinSynBackupRx", MPTCP_MIB_JOINSYNBACKUPRX), + SNMP_MIB_ITEM("MPJoinSynAckRx", MPTCP_MIB_JOINSYNACKRX), ++ SNMP_MIB_ITEM("MPJoinSynAckBackupRx", MPTCP_MIB_JOINSYNACKBACKUPRX), + SNMP_MIB_ITEM("MPJoinSynAckHMacFailure", MPTCP_MIB_JOINSYNACKMAC), + SNMP_MIB_ITEM("MPJoinAckRx", MPTCP_MIB_JOINACKRX), + SNMP_MIB_ITEM("MPJoinAckHMacFailure", MPTCP_MIB_JOINACKMAC), +diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h +index dd7fd1f246b5f..443604462ace8 100644 +--- a/net/mptcp/mib.h ++++ b/net/mptcp/mib.h +@@ -12,7 +12,9 @@ enum linux_mptcp_mib_field { + MPTCP_MIB_RETRANSSEGS, /* Segments retransmitted at the MPTCP-level */ + MPTCP_MIB_JOINNOTOKEN, /* Received MP_JOIN but the token was not found */ + MPTCP_MIB_JOINSYNRX, /* Received a SYN + MP_JOIN */ ++ MPTCP_MIB_JOINSYNBACKUPRX, /* Received a SYN + MP_JOIN + backup flag */ + MPTCP_MIB_JOINSYNACKRX, /* Received a SYN/ACK + MP_JOIN */ ++ MPTCP_MIB_JOINSYNACKBACKUPRX, /* Received a SYN/ACK + MP_JOIN + backup flag */ + MPTCP_MIB_JOINSYNACKMAC, /* HMAC was wrong on SYN/ACK + MP_JOIN */ + MPTCP_MIB_JOINACKRX, /* Received an ACK + MP_JOIN */ + MPTCP_MIB_JOINACKMAC, /* HMAC was wrong on ACK + MP_JOIN */ +diff --git a/net/mptcp/options.c b/net/mptcp/options.c +index 63fc0758c22d4..85aafa94cc8ab 100644 +--- a/net/mptcp/options.c ++++ b/net/mptcp/options.c +@@ -909,7 +909,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size, + return true; + } else if (subflow_req->mp_join) { + opts->suboptions = OPTION_MPTCP_MPJ_SYNACK; +- opts->backup = subflow_req->backup; ++ opts->backup = subflow_req->request_bkup; + opts->join_id = subflow_req->local_id; + opts->thmac = subflow_req->thmac; + opts->nonce = subflow_req->local_nonce; +diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c +index f58bf77d76b81..db621933b2035 100644 +--- a/net/mptcp/pm_netlink.c ++++ b/net/mptcp/pm_netlink.c +@@ -476,7 +476,6 @@ static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_con + slow = lock_sock_fast(ssk); + if (prio) { + subflow->send_mp_prio = 1; +- subflow->backup = backup; + subflow->request_bkup = backup; + } + +@@ -1432,6 +1431,7 @@ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk, + ret = remove_anno_list_by_saddr(msk, addr); + if (ret || force) { + spin_lock_bh(&msk->pm.lock); ++ msk->pm.add_addr_signaled -= ret; + mptcp_pm_remove_addr(msk, &list); + spin_unlock_bh(&msk->pm.lock); + } +@@ -1565,16 +1565,25 @@ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list) + { + struct mptcp_rm_list alist = { .nr = 0 }; + struct mptcp_pm_addr_entry *entry; ++ int anno_nr = 0; + + list_for_each_entry(entry, rm_list, list) { +- if ((remove_anno_list_by_saddr(msk, &entry->addr) || +- lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) && +- alist.nr < MPTCP_RM_IDS_MAX) +- alist.ids[alist.nr++] = entry->addr.id; ++ if (alist.nr >= MPTCP_RM_IDS_MAX) ++ break; ++ ++ /* only delete if either announced or matching a subflow */ ++ if (remove_anno_list_by_saddr(msk, &entry->addr)) ++ anno_nr++; ++ else if (!lookup_subflow_by_saddr(&msk->conn_list, ++ &entry->addr)) ++ continue; ++ ++ alist.ids[alist.nr++] = entry->addr.id; + } + + if (alist.nr) { + spin_lock_bh(&msk->pm.lock); ++ msk->pm.add_addr_signaled -= anno_nr; + mptcp_pm_remove_addr(msk, &alist); + spin_unlock_bh(&msk->pm.lock); + } +@@ -1587,17 +1596,18 @@ void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk, + struct mptcp_pm_addr_entry *entry; + + list_for_each_entry(entry, rm_list, list) { +- if (lookup_subflow_by_saddr(&msk->conn_list, &entry->addr) && +- slist.nr < MPTCP_RM_IDS_MAX) ++ if (slist.nr < MPTCP_RM_IDS_MAX && ++ lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) + slist.ids[slist.nr++] = entry->addr.id; + +- if (remove_anno_list_by_saddr(msk, &entry->addr) && +- alist.nr < MPTCP_RM_IDS_MAX) ++ if (alist.nr < MPTCP_RM_IDS_MAX && ++ remove_anno_list_by_saddr(msk, &entry->addr)) + alist.ids[alist.nr++] = entry->addr.id; + } + + if (alist.nr) { + spin_lock_bh(&msk->pm.lock); ++ msk->pm.add_addr_signaled -= alist.nr; + mptcp_pm_remove_addr(msk, &alist); + spin_unlock_bh(&msk->pm.lock); + } +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c +index fbf2b26760731..c1d652a3f7a9b 100644 +--- a/net/mptcp/protocol.c ++++ b/net/mptcp/protocol.c +@@ -352,8 +352,10 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, + skb_orphan(skb); + + /* try to fetch required memory from subflow */ +- if (!mptcp_rmem_schedule(sk, ssk, skb->truesize)) ++ if (!mptcp_rmem_schedule(sk, ssk, skb->truesize)) { ++ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED); + goto drop; ++ } + + has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp; + +@@ -842,16 +844,13 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk) + sk_rbuf = ssk_rbuf; + + /* over limit? can't append more skbs to msk, Also, no need to wake-up*/ +- if (__mptcp_rmem(sk) > sk_rbuf) { +- MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED); ++ if (__mptcp_rmem(sk) > sk_rbuf) + return; +- } + + /* Wake-up the reader only for in-sequence data */ + mptcp_data_lock(sk); +- if (move_skbs_to_msk(msk, ssk)) ++ if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk)) + sk->sk_data_ready(sk); +- + mptcp_data_unlock(sk); + } + +@@ -1418,13 +1417,15 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) + } + + mptcp_for_each_subflow(msk, subflow) { ++ bool backup = subflow->backup || subflow->request_bkup; ++ + trace_mptcp_subflow_get_send(subflow); + ssk = mptcp_subflow_tcp_sock(subflow); + if (!mptcp_subflow_active(subflow)) + continue; + + tout = max(tout, mptcp_timeout_from_subflow(subflow)); +- nr_active += !subflow->backup; ++ nr_active += !backup; + pace = subflow->avg_pacing_rate; + if (unlikely(!pace)) { + /* init pacing rate from socket */ +@@ -1435,9 +1436,9 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) + } + + linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace); +- if (linger_time < send_info[subflow->backup].linger_time) { +- send_info[subflow->backup].ssk = ssk; +- send_info[subflow->backup].linger_time = linger_time; ++ if (linger_time < send_info[backup].linger_time) { ++ send_info[backup].ssk = ssk; ++ send_info[backup].linger_time = linger_time; + } + } + __mptcp_set_timeout(sk, tout); +@@ -1918,6 +1919,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, + if (!(flags & MSG_PEEK)) { + MPTCP_SKB_CB(skb)->offset += count; + MPTCP_SKB_CB(skb)->map_seq += count; ++ msk->bytes_consumed += count; + } + break; + } +@@ -1928,6 +1930,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, + WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize); + __skb_unlink(skb, &msk->receive_queue); + __kfree_skb(skb); ++ msk->bytes_consumed += count; + } + + if (copied >= len) +@@ -2023,7 +2026,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) + ssk = mptcp_subflow_tcp_sock(subflow); + slow = lock_sock_fast(ssk); + WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); +- tcp_sk(ssk)->window_clamp = window_clamp; ++ WRITE_ONCE(tcp_sk(ssk)->window_clamp, window_clamp); + tcp_cleanup_rbuf(ssk, 1); + unlock_sock_fast(ssk, slow); + } +@@ -2752,6 +2755,7 @@ static void __mptcp_init_sock(struct sock *sk) + msk->rmem_fwd_alloc = 0; + WRITE_ONCE(msk->rmem_released, 0); + msk->timer_ival = TCP_RTO_MIN; ++ msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO; + + WRITE_ONCE(msk->first, NULL); + inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss; +@@ -2984,16 +2988,9 @@ void __mptcp_unaccepted_force_close(struct sock *sk) + __mptcp_destroy_sock(sk); + } + +-static __poll_t mptcp_check_readable(struct mptcp_sock *msk) ++static __poll_t mptcp_check_readable(struct sock *sk) + { +- /* Concurrent splices from sk_receive_queue into receive_queue will +- * always show at least one non-empty queue when checked in this order. +- */ +- if (skb_queue_empty_lockless(&((struct sock *)msk)->sk_receive_queue) && +- skb_queue_empty_lockless(&msk->receive_queue)) +- return 0; +- +- return EPOLLIN | EPOLLRDNORM; ++ return mptcp_epollin_ready(sk) ? EPOLLIN | EPOLLRDNORM : 0; + } + + static void mptcp_check_listen_stop(struct sock *sk) +@@ -3031,7 +3028,7 @@ bool __mptcp_close(struct sock *sk, long timeout) + goto cleanup; + } + +- if (mptcp_check_readable(msk) || timeout < 0) { ++ if (mptcp_data_avail(msk) || timeout < 0) { + /* If the msk has read data, or the caller explicitly ask it, + * do the MPTCP equivalent of TCP reset, aka MPTCP fastclose + */ +@@ -3157,6 +3154,7 @@ static int mptcp_disconnect(struct sock *sk, int flags) + msk->snd_data_fin_enable = false; + msk->rcv_fastclose = false; + msk->use_64bit_ack = false; ++ msk->bytes_consumed = 0; + WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); + mptcp_pm_data_reset(msk); + mptcp_ca_reset(sk); +@@ -3983,7 +3981,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock, + mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; + + if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) { +- mask |= mptcp_check_readable(msk); ++ mask |= mptcp_check_readable(sk); + if (shutdown & SEND_SHUTDOWN) + mask |= EPOLLOUT | EPOLLWRNORM; + else +@@ -4021,6 +4019,7 @@ static const struct proto_ops mptcp_stream_ops = { + .sendmsg = inet_sendmsg, + .recvmsg = inet_recvmsg, + .mmap = sock_no_mmap, ++ .set_rcvlowat = mptcp_set_rcvlowat, + }; + + static struct inet_protosw mptcp_protosw = { +@@ -4122,6 +4121,7 @@ static const struct proto_ops mptcp_v6_stream_ops = { + #ifdef CONFIG_COMPAT + .compat_ioctl = inet6_compat_ioctl, + #endif ++ .set_rcvlowat = mptcp_set_rcvlowat, + }; + + static struct proto mptcp_v6_prot; +diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h +index 93ba48f4ae386..c28ac5dfd0b58 100644 +--- a/net/mptcp/protocol.h ++++ b/net/mptcp/protocol.h +@@ -268,6 +268,7 @@ struct mptcp_sock { + atomic64_t rcv_wnd_sent; + u64 rcv_data_fin_seq; + u64 bytes_retrans; ++ u64 bytes_consumed; + int rmem_fwd_alloc; + int snd_burst; + int old_wspace; +@@ -418,6 +419,7 @@ struct mptcp_subflow_request_sock { + u16 mp_capable : 1, + mp_join : 1, + backup : 1, ++ request_bkup : 1, + csum_reqd : 1, + allow_join_id0 : 1; + u8 local_id; +@@ -674,6 +676,24 @@ struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk); + int mptcp_sched_get_send(struct mptcp_sock *msk); + int mptcp_sched_get_retrans(struct mptcp_sock *msk); + ++static inline u64 mptcp_data_avail(const struct mptcp_sock *msk) ++{ ++ return READ_ONCE(msk->bytes_received) - READ_ONCE(msk->bytes_consumed); ++} ++ ++static inline bool mptcp_epollin_ready(const struct sock *sk) ++{ ++ /* mptcp doesn't have to deal with small skbs in the receive queue, ++ * at it can always coalesce them ++ */ ++ return (mptcp_data_avail(mptcp_sk(sk)) >= sk->sk_rcvlowat) || ++ (mem_cgroup_sockets_enabled && sk->sk_memcg && ++ mem_cgroup_under_socket_pressure(sk->sk_memcg)) || ++ READ_ONCE(tcp_memory_pressure); ++} ++ ++int mptcp_set_rcvlowat(struct sock *sk, int val); ++ + static inline bool __tcp_can_send(const struct sock *ssk) + { + /* only send if our side has not closed yet */ +@@ -748,6 +768,7 @@ static inline bool mptcp_is_fully_established(struct sock *sk) + return inet_sk_state_load(sk) == TCP_ESTABLISHED && + READ_ONCE(mptcp_sk(sk)->fully_established); + } ++ + void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk); + void mptcp_data_ready(struct sock *sk, struct sock *ssk); + bool mptcp_finish_join(struct sock *sk); +diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c +index cc04b5e29dd35..bdfeecea814f3 100644 +--- a/net/mptcp/sockopt.c ++++ b/net/mptcp/sockopt.c +@@ -1521,9 +1521,55 @@ void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk) + + msk_owned_by_me(msk); + ++ ssk->sk_rcvlowat = 0; ++ + if (READ_ONCE(subflow->setsockopt_seq) != msk->setsockopt_seq) { + sync_socket_options(msk, ssk); + + subflow->setsockopt_seq = msk->setsockopt_seq; + } + } ++ ++/* unfortunately this is different enough from the tcp version so ++ * that we can't factor it out ++ */ ++int mptcp_set_rcvlowat(struct sock *sk, int val) ++{ ++ struct mptcp_subflow_context *subflow; ++ int space, cap; ++ ++ /* bpf can land here with a wrong sk type */ ++ if (sk->sk_protocol == IPPROTO_TCP) ++ return -EINVAL; ++ ++ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) ++ cap = sk->sk_rcvbuf >> 1; ++ else ++ cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1; ++ val = min(val, cap); ++ WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); ++ ++ /* Check if we need to signal EPOLLIN right now */ ++ if (mptcp_epollin_ready(sk)) ++ sk->sk_data_ready(sk); ++ ++ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) ++ return 0; ++ ++ space = __tcp_space_from_win(mptcp_sk(sk)->scaling_ratio, val); ++ if (space <= sk->sk_rcvbuf) ++ return 0; ++ ++ /* propagate the rcvbuf changes to all the subflows */ ++ WRITE_ONCE(sk->sk_rcvbuf, space); ++ mptcp_for_each_subflow(mptcp_sk(sk), subflow) { ++ struct sock *ssk = mptcp_subflow_tcp_sock(subflow); ++ bool slow; ++ ++ slow = lock_sock_fast(ssk); ++ WRITE_ONCE(ssk->sk_rcvbuf, space); ++ WRITE_ONCE(tcp_sk(ssk)->window_clamp, val); ++ unlock_sock_fast(ssk, slow); ++ } ++ return 0; ++} +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c +index 23ee96c6abcbf..bc1efc1787720 100644 +--- a/net/mptcp/subflow.c ++++ b/net/mptcp/subflow.c +@@ -166,6 +166,9 @@ static int subflow_check_req(struct request_sock *req, + return 0; + } else if (opt_mp_join) { + SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX); ++ ++ if (mp_opt.backup) ++ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNBACKUPRX); + } + + if (opt_mp_capable && listener->request_mptcp) { +@@ -558,6 +561,9 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) + subflow->mp_join = 1; + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX); + ++ if (subflow->backup) ++ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX); ++ + if (subflow_use_different_dport(msk, sk)) { + pr_debug("synack inet_dport=%d %d", + ntohs(inet_sk(sk)->inet_dport), +@@ -1192,14 +1198,22 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb, + { + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); + bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; +- u32 incr; ++ struct tcp_sock *tp = tcp_sk(ssk); ++ u32 offset, incr, avail_len; ++ ++ offset = tp->copied_seq - TCP_SKB_CB(skb)->seq; ++ if (WARN_ON_ONCE(offset > skb->len)) ++ goto out; + +- incr = limit >= skb->len ? skb->len + fin : limit; ++ avail_len = skb->len - offset; ++ incr = limit >= avail_len ? avail_len + fin : limit; + +- pr_debug("discarding=%d len=%d seq=%d", incr, skb->len, +- subflow->map_subflow_seq); ++ pr_debug("discarding=%d len=%d offset=%d seq=%d", incr, skb->len, ++ offset, subflow->map_subflow_seq); + MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA); + tcp_sk(ssk)->copied_seq += incr; ++ ++out: + if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq)) + sk_eat_skb(ssk, skb); + if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) +@@ -1432,10 +1446,18 @@ static void subflow_data_ready(struct sock *sk) + WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable && + !subflow->mp_join && !(state & TCPF_CLOSE)); + +- if (mptcp_subflow_data_available(sk)) ++ if (mptcp_subflow_data_available(sk)) { + mptcp_data_ready(parent, sk); +- else if (unlikely(sk->sk_err)) ++ ++ /* subflow-level lowat test are not relevant. ++ * respect the msk-level threshold eventually mandating an immediate ack ++ */ ++ if (mptcp_data_avail(msk) < parent->sk_rcvlowat && ++ (tcp_sk(sk)->rcv_nxt - tcp_sk(sk)->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss) ++ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; ++ } else if (unlikely(sk->sk_err)) { + subflow_error_report(sk); ++ } + } + + static void subflow_write_space(struct sock *ssk) +@@ -1968,6 +1990,7 @@ static void subflow_ulp_clone(const struct request_sock *req, + new_ctx->fully_established = 1; + new_ctx->remote_key_valid = 1; + new_ctx->backup = subflow_req->backup; ++ new_ctx->request_bkup = subflow_req->request_bkup; + WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id); + new_ctx->token = subflow_req->token; + new_ctx->thmac = subflow_req->thmac; +diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c +index 3ac19516ed803..50d24e240e8fb 100644 +--- a/net/sched/act_ct.c ++++ b/net/sched/act_ct.c +@@ -44,6 +44,8 @@ static DEFINE_MUTEX(zones_mutex); + struct zones_ht_key { + struct net *net; + u16 zone; ++ /* Note : pad[] must be the last field. */ ++ u8 pad[]; + }; + + struct tcf_ct_flow_table { +@@ -60,7 +62,7 @@ struct tcf_ct_flow_table { + static const struct rhashtable_params zones_params = { + .head_offset = offsetof(struct tcf_ct_flow_table, node), + .key_offset = offsetof(struct tcf_ct_flow_table, key), +- .key_len = sizeof_field(struct tcf_ct_flow_table, key), ++ .key_len = offsetof(struct zones_ht_key, pad), + .automatic_shrinking = true, + }; + +diff --git a/net/sysctl_net.c b/net/sysctl_net.c +index 051ed5f6fc937..a0a7a79991f9f 100644 +--- a/net/sysctl_net.c ++++ b/net/sysctl_net.c +@@ -54,7 +54,6 @@ static int net_ctl_permissions(struct ctl_table_header *head, + } + + static void net_ctl_set_ownership(struct ctl_table_header *head, +- struct ctl_table *table, + kuid_t *uid, kgid_t *gid) + { + struct net *net = container_of(head->set, struct net, sysctls); +diff --git a/net/wireless/sme.c b/net/wireless/sme.c +index 9bba233b5a6ec..72d78dbc55ffd 100644 +--- a/net/wireless/sme.c ++++ b/net/wireless/sme.c +@@ -1057,6 +1057,7 @@ void cfg80211_connect_done(struct net_device *dev, + cfg80211_hold_bss( + bss_from_pub(params->links[link].bss)); + ev->cr.links[link].bss = params->links[link].bss; ++ ev->cr.links[link].status = params->links[link].status; + + if (params->links[link].addr) { + ev->cr.links[link].addr = next; +diff --git a/sound/core/seq/seq_ump_convert.c b/sound/core/seq/seq_ump_convert.c +index e90b27a135e6f..d9dacfbe4a9ae 100644 +--- a/sound/core/seq/seq_ump_convert.c ++++ b/sound/core/seq/seq_ump_convert.c +@@ -1192,44 +1192,53 @@ static int cvt_sysex_to_ump(struct snd_seq_client *dest, + { + struct snd_seq_ump_event ev_cvt; + unsigned char status; +- u8 buf[6], *xbuf; ++ u8 buf[8], *xbuf; + int offset = 0; + int len, err; ++ bool finished = false; + + if (!snd_seq_ev_is_variable(event)) + return 0; + + setup_ump_event(&ev_cvt, event); +- for (;;) { ++ while (!finished) { + len = snd_seq_expand_var_event_at(event, sizeof(buf), buf, offset); + if (len <= 0) + break; +- if (WARN_ON(len > 6)) ++ if (WARN_ON(len > sizeof(buf))) + break; +- offset += len; ++ + xbuf = buf; ++ status = UMP_SYSEX_STATUS_CONTINUE; ++ /* truncate the sysex start-marker */ + if (*xbuf == UMP_MIDI1_MSG_SYSEX_START) { + status = UMP_SYSEX_STATUS_START; +- xbuf++; + len--; +- if (len > 0 && xbuf[len - 1] == UMP_MIDI1_MSG_SYSEX_END) { ++ offset++; ++ xbuf++; ++ } ++ ++ /* if the last of this packet or the 1st byte of the next packet ++ * is the end-marker, finish the transfer with this packet ++ */ ++ if (len > 0 && len < 8 && ++ xbuf[len - 1] == UMP_MIDI1_MSG_SYSEX_END) { ++ if (status == UMP_SYSEX_STATUS_START) + status = UMP_SYSEX_STATUS_SINGLE; +- len--; +- } +- } else { +- if (xbuf[len - 1] == UMP_MIDI1_MSG_SYSEX_END) { ++ else + status = UMP_SYSEX_STATUS_END; +- len--; +- } else { +- status = UMP_SYSEX_STATUS_CONTINUE; +- } ++ len--; ++ finished = true; + } ++ ++ len = min(len, 6); + fill_sysex7_ump(dest_port, ev_cvt.ump, status, xbuf, len); + err = __snd_seq_deliver_single_event(dest, dest_port, + (struct snd_seq_event *)&ev_cvt, + atomic, hop); + if (err < 0) + return err; ++ offset += len; + } + return 0; + } +diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c +index c9f153f85ae6b..5f0f8d9c08d1e 100644 +--- a/sound/firewire/amdtp-stream.c ++++ b/sound/firewire/amdtp-stream.c +@@ -77,6 +77,8 @@ + // overrun. Actual device can skip more, then this module stops the packet streaming. + #define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5 + ++static void pcm_period_work(struct work_struct *work); ++ + /** + * amdtp_stream_init - initialize an AMDTP stream structure + * @s: the AMDTP stream to initialize +@@ -105,6 +107,7 @@ int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, + s->flags = flags; + s->context = ERR_PTR(-1); + mutex_init(&s->mutex); ++ INIT_WORK(&s->period_work, pcm_period_work); + s->packet_index = 0; + + init_waitqueue_head(&s->ready_wait); +@@ -347,6 +350,7 @@ EXPORT_SYMBOL(amdtp_stream_get_max_payload); + */ + void amdtp_stream_pcm_prepare(struct amdtp_stream *s) + { ++ cancel_work_sync(&s->period_work); + s->pcm_buffer_pointer = 0; + s->pcm_period_pointer = 0; + } +@@ -611,19 +615,21 @@ static void update_pcm_pointers(struct amdtp_stream *s, + // The program in user process should periodically check the status of intermediate + // buffer associated to PCM substream to process PCM frames in the buffer, instead + // of receiving notification of period elapsed by poll wait. +- if (!pcm->runtime->no_period_wakeup) { +- if (in_softirq()) { +- // In software IRQ context for 1394 OHCI. +- snd_pcm_period_elapsed(pcm); +- } else { +- // In process context of ALSA PCM application under acquired lock of +- // PCM substream. +- snd_pcm_period_elapsed_under_stream_lock(pcm); +- } +- } ++ if (!pcm->runtime->no_period_wakeup) ++ queue_work(system_highpri_wq, &s->period_work); + } + } + ++static void pcm_period_work(struct work_struct *work) ++{ ++ struct amdtp_stream *s = container_of(work, struct amdtp_stream, ++ period_work); ++ struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); ++ ++ if (pcm) ++ snd_pcm_period_elapsed(pcm); ++} ++ + static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params, + bool sched_irq) + { +@@ -1852,11 +1858,14 @@ unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d, + { + struct amdtp_stream *irq_target = d->irq_target; + +- // Process isochronous packets queued till recent isochronous cycle to handle PCM frames. + if (irq_target && amdtp_stream_running(irq_target)) { +- // In software IRQ context, the call causes dead-lock to disable the tasklet +- // synchronously. +- if (!in_softirq()) ++ // use wq to prevent AB/BA deadlock competition for ++ // substream lock: ++ // fw_iso_context_flush_completions() acquires ++ // lock by ohci_flush_iso_completions(), ++ // amdtp-stream process_rx_packets() attempts to ++ // acquire same lock by snd_pcm_elapsed() ++ if (current_work() != &s->period_work) + fw_iso_context_flush_completions(irq_target->context); + } + +@@ -1912,6 +1921,7 @@ static void amdtp_stream_stop(struct amdtp_stream *s) + return; + } + ++ cancel_work_sync(&s->period_work); + fw_iso_context_stop(s->context); + fw_iso_context_destroy(s->context); + s->context = ERR_PTR(-1); +diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h +index a1ed2e80f91a7..775db3fc4959f 100644 +--- a/sound/firewire/amdtp-stream.h ++++ b/sound/firewire/amdtp-stream.h +@@ -191,6 +191,7 @@ struct amdtp_stream { + + /* For a PCM substream processing. */ + struct snd_pcm_substream *pcm; ++ struct work_struct period_work; + snd_pcm_uframes_t pcm_buffer_pointer; + unsigned int pcm_period_pointer; + unsigned int pcm_frame_multiplier; +diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h +index 8556031bcd68e..f31cb31d46362 100644 +--- a/sound/pci/hda/hda_controller.h ++++ b/sound/pci/hda/hda_controller.h +@@ -28,7 +28,7 @@ + #else + #define AZX_DCAPS_I915_COMPONENT 0 /* NOP */ + #endif +-/* 14 unused */ ++#define AZX_DCAPS_AMD_ALLOC_FIX (1 << 14) /* AMD allocation workaround */ + #define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */ + #define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */ + #define AZX_DCAPS_AMD_WORKAROUND (1 << 17) /* AMD-specific workaround */ +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index a6a9d353fe635..d5c9f113e477a 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -40,6 +40,7 @@ + + #ifdef CONFIG_X86 + /* for snoop control */ ++#include + #include + #include + #endif +@@ -301,7 +302,7 @@ enum { + + /* quirks for ATI HDMI with snoop off */ + #define AZX_DCAPS_PRESET_ATI_HDMI_NS \ +- (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_SNOOP_OFF) ++ (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_AMD_ALLOC_FIX) + + /* quirks for AMD SB */ + #define AZX_DCAPS_PRESET_AMD_SB \ +@@ -1715,6 +1716,13 @@ static void azx_check_snoop_available(struct azx *chip) + if (chip->driver_caps & AZX_DCAPS_SNOOP_OFF) + snoop = false; + ++#ifdef CONFIG_X86 ++ /* check the presence of DMA ops (i.e. IOMMU), disable snoop conditionally */ ++ if ((chip->driver_caps & AZX_DCAPS_AMD_ALLOC_FIX) && ++ !get_dma_ops(chip->card->dev)) ++ snoop = false; ++#endif ++ + chip->snoop = snoop; + if (!snoop) { + dev_info(chip->card->dev, "Force to non-snoop mode\n"); +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index e8209178d87bb..af921364195e4 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -21,12 +21,6 @@ + #include "hda_jack.h" + #include "hda_generic.h" + +-enum { +- CX_HEADSET_NOPRESENT = 0, +- CX_HEADSET_PARTPRESENT, +- CX_HEADSET_ALLPRESENT, +-}; +- + struct conexant_spec { + struct hda_gen_spec gen; + +@@ -48,7 +42,6 @@ struct conexant_spec { + unsigned int gpio_led; + unsigned int gpio_mute_led_mask; + unsigned int gpio_mic_led_mask; +- unsigned int headset_present_flag; + bool is_cx8070_sn6140; + }; + +@@ -250,48 +243,19 @@ static void cx_process_headset_plugin(struct hda_codec *codec) + } + } + +-static void cx_update_headset_mic_vref(struct hda_codec *codec, unsigned int res) ++static void cx_update_headset_mic_vref(struct hda_codec *codec, struct hda_jack_callback *event) + { +- unsigned int phone_present, mic_persent, phone_tag, mic_tag; +- struct conexant_spec *spec = codec->spec; ++ unsigned int mic_present; + + /* In cx8070 and sn6140, the node 16 can only be config to headphone or disabled, + * the node 19 can only be config to microphone or disabled. + * Check hp&mic tag to process headset pulgin&plugout. + */ +- phone_tag = snd_hda_codec_read(codec, 0x16, 0, AC_VERB_GET_UNSOLICITED_RESPONSE, 0x0); +- mic_tag = snd_hda_codec_read(codec, 0x19, 0, AC_VERB_GET_UNSOLICITED_RESPONSE, 0x0); +- if ((phone_tag & (res >> AC_UNSOL_RES_TAG_SHIFT)) || +- (mic_tag & (res >> AC_UNSOL_RES_TAG_SHIFT))) { +- phone_present = snd_hda_codec_read(codec, 0x16, 0, AC_VERB_GET_PIN_SENSE, 0x0); +- if (!(phone_present & AC_PINSENSE_PRESENCE)) {/* headphone plugout */ +- spec->headset_present_flag = CX_HEADSET_NOPRESENT; +- snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20); +- return; +- } +- if (spec->headset_present_flag == CX_HEADSET_NOPRESENT) { +- spec->headset_present_flag = CX_HEADSET_PARTPRESENT; +- } else if (spec->headset_present_flag == CX_HEADSET_PARTPRESENT) { +- mic_persent = snd_hda_codec_read(codec, 0x19, 0, +- AC_VERB_GET_PIN_SENSE, 0x0); +- /* headset is present */ +- if ((phone_present & AC_PINSENSE_PRESENCE) && +- (mic_persent & AC_PINSENSE_PRESENCE)) { +- cx_process_headset_plugin(codec); +- spec->headset_present_flag = CX_HEADSET_ALLPRESENT; +- } +- } +- } +-} +- +-static void cx_jack_unsol_event(struct hda_codec *codec, unsigned int res) +-{ +- struct conexant_spec *spec = codec->spec; +- +- if (spec->is_cx8070_sn6140) +- cx_update_headset_mic_vref(codec, res); +- +- snd_hda_jack_unsol_event(codec, res); ++ mic_present = snd_hda_codec_read(codec, 0x19, 0, AC_VERB_GET_PIN_SENSE, 0x0); ++ if (!(mic_present & AC_PINSENSE_PRESENCE)) /* mic plugout */ ++ snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20); ++ else ++ cx_process_headset_plugin(codec); + } + + #ifdef CONFIG_PM +@@ -307,7 +271,7 @@ static const struct hda_codec_ops cx_auto_patch_ops = { + .build_pcms = snd_hda_gen_build_pcms, + .init = cx_auto_init, + .free = cx_auto_free, +- .unsol_event = cx_jack_unsol_event, ++ .unsol_event = snd_hda_jack_unsol_event, + #ifdef CONFIG_PM + .suspend = cx_auto_suspend, + .check_power_status = snd_hda_gen_check_power_status, +@@ -1167,7 +1131,7 @@ static int patch_conexant_auto(struct hda_codec *codec) + case 0x14f11f86: + case 0x14f11f87: + spec->is_cx8070_sn6140 = true; +- spec->headset_present_flag = CX_HEADSET_NOPRESENT; ++ snd_hda_jack_detect_enable_callback(codec, 0x19, cx_update_headset_mic_vref); + break; + } + +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index f3aca1c38b77d..0b33a00771450 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -9639,6 +9639,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), + SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC), + SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF), ++ SND_PCI_QUIRK(0x1025, 0x100c, "Acer Aspire E5-574G", ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK), + SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC), +diff --git a/sound/usb/stream.c b/sound/usb/stream.c +index d5409f3879455..e14c725acebf2 100644 +--- a/sound/usb/stream.c ++++ b/sound/usb/stream.c +@@ -244,8 +244,8 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits, + SNDRV_CHMAP_FR, /* right front */ + SNDRV_CHMAP_FC, /* center front */ + SNDRV_CHMAP_LFE, /* LFE */ +- SNDRV_CHMAP_SL, /* left surround */ +- SNDRV_CHMAP_SR, /* right surround */ ++ SNDRV_CHMAP_RL, /* left surround */ ++ SNDRV_CHMAP_RR, /* right surround */ + SNDRV_CHMAP_FLC, /* left of center */ + SNDRV_CHMAP_FRC, /* right of center */ + SNDRV_CHMAP_RC, /* surround */ +diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c +index aee937d14fbbf..09e6b4e1401c9 100644 +--- a/tools/perf/util/callchain.c ++++ b/tools/perf/util/callchain.c +@@ -1126,7 +1126,7 @@ int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *samp + int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node, + bool hide_unresolved) + { +- struct machine *machine = maps__machine(node->ms.maps); ++ struct machine *machine = node->ms.maps ? maps__machine(node->ms.maps) : NULL; + + maps__put(al->maps); + al->maps = maps__get(node->ms.maps); +diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c +index d2043ec3bf6d6..4209b95690394 100644 +--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c ++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c +@@ -1115,11 +1115,11 @@ int main_loop_s(int listensock) + return 1; + } + +- if (--cfg_repeat > 0) { +- if (cfg_input) +- close(fd); ++ if (cfg_input) ++ close(fd); ++ ++ if (--cfg_repeat > 0) + goto again; +- } + + return 0; + } +diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh +index 231a95a8de9ee..a2dae2a3a93e0 100755 +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh +@@ -1778,6 +1778,8 @@ chk_prio_nr() + { + local mp_prio_nr_tx=$1 + local mp_prio_nr_rx=$2 ++ local mpj_syn=$3 ++ local mpj_syn_ack=$4 + local count + + print_check "ptx" +@@ -1799,6 +1801,26 @@ chk_prio_nr() + else + print_ok + fi ++ ++ print_check "syn backup" ++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinSynBackupRx") ++ if [ -z "$count" ]; then ++ print_skip ++ elif [ "$count" != "$mpj_syn" ]; then ++ fail_test "got $count JOIN[s] syn with Backup expected $mpj_syn" ++ else ++ print_ok ++ fi ++ ++ print_check "synack backup" ++ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtMPJoinSynAckBackupRx") ++ if [ -z "$count" ]; then ++ print_skip ++ elif [ "$count" != "$mpj_syn_ack" ]; then ++ fail_test "got $count JOIN[s] synack with Backup expected $mpj_syn_ack" ++ else ++ print_ok ++ fi + } + + chk_subflow_nr() +@@ -2751,11 +2773,24 @@ backup_tests() + sflags=nobackup speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 1 1 1 +- chk_prio_nr 0 1 ++ chk_prio_nr 0 1 1 0 + fi + + # single address, backup + if reset "single address, backup" && ++ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then ++ pm_nl_set_limits $ns1 0 1 ++ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup ++ pm_nl_set_limits $ns2 1 1 ++ sflags=nobackup speed=slow \ ++ run_tests $ns1 $ns2 10.0.1.1 ++ chk_join_nr 1 1 1 ++ chk_add_nr 1 1 ++ chk_prio_nr 1 0 0 1 ++ fi ++ ++ # single address, switch to backup ++ if reset "single address, switch to backup" && + continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then + pm_nl_set_limits $ns1 0 1 + pm_nl_add_endpoint $ns1 10.0.2.1 flags signal +@@ -2764,20 +2799,20 @@ backup_tests() + run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 1 1 1 + chk_add_nr 1 1 +- chk_prio_nr 1 1 ++ chk_prio_nr 1 1 0 0 + fi + + # single address with port, backup + if reset "single address with port, backup" && + continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then + pm_nl_set_limits $ns1 0 1 +- pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100 ++ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup port 10100 + pm_nl_set_limits $ns2 1 1 +- sflags=backup speed=slow \ ++ sflags=nobackup speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 1 1 1 + chk_add_nr 1 1 +- chk_prio_nr 1 1 ++ chk_prio_nr 1 0 0 1 + fi + + if reset "mpc backup" && +@@ -2786,17 +2821,26 @@ backup_tests() + speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 0 0 0 +- chk_prio_nr 0 1 ++ chk_prio_nr 0 1 0 0 + fi + + if reset "mpc backup both sides" && + continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then +- pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow,backup ++ pm_nl_set_limits $ns1 0 2 ++ pm_nl_set_limits $ns2 1 2 ++ pm_nl_add_endpoint $ns1 10.0.1.1 flags signal,backup + pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup ++ ++ # 10.0.2.2 (non-backup) -> 10.0.1.1 (backup) ++ pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow ++ # 10.0.1.2 (backup) -> 10.0.2.1 (non-backup) ++ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal ++ ip -net "$ns2" route add 10.0.2.1 via 10.0.1.1 dev ns2eth1 # force this path ++ + speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 +- chk_join_nr 0 0 0 +- chk_prio_nr 1 1 ++ chk_join_nr 2 2 2 ++ chk_prio_nr 1 1 1 1 + fi + + if reset "mpc switch to backup" && +@@ -2805,7 +2849,7 @@ backup_tests() + sflags=backup speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 0 0 0 +- chk_prio_nr 0 1 ++ chk_prio_nr 0 1 0 0 + fi + + if reset "mpc switch to backup both sides" && +@@ -2815,7 +2859,7 @@ backup_tests() + sflags=backup speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 0 0 0 +- chk_prio_nr 1 1 ++ chk_prio_nr 1 1 0 0 + fi + } + +@@ -3215,7 +3259,7 @@ fullmesh_tests() + addr_nr_ns2=1 sflags=backup,fullmesh speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 2 2 2 +- chk_prio_nr 0 1 ++ chk_prio_nr 0 1 1 0 + chk_rm_nr 0 1 + fi + +@@ -3228,7 +3272,7 @@ fullmesh_tests() + sflags=nobackup,nofullmesh speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 2 2 2 +- chk_prio_nr 0 1 ++ chk_prio_nr 0 1 1 0 + chk_rm_nr 0 1 + fi + } +@@ -3407,7 +3451,7 @@ userspace_tests() + sflags=backup speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 1 1 0 +- chk_prio_nr 0 0 ++ chk_prio_nr 0 0 0 0 + fi + + # userspace pm type prevents rm_addr diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.45-46.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.45-46.patch new file mode 100644 index 000000000000..359d52eccf87 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.45-46.patch @@ -0,0 +1,6300 @@ +diff --git a/Documentation/admin-guide/cifs/usage.rst b/Documentation/admin-guide/cifs/usage.rst +index 20aba92dfc5f5..3de599cf0779a 100644 +--- a/Documentation/admin-guide/cifs/usage.rst ++++ b/Documentation/admin-guide/cifs/usage.rst +@@ -741,7 +741,7 @@ SecurityFlags Flags which control security negotiation and + may use NTLMSSP 0x00080 + must use NTLMSSP 0x80080 + seal (packet encryption) 0x00040 +- must seal (not implemented yet) 0x40040 ++ must seal 0x40040 + + cifsFYI If set to non-zero value, additional debug information + will be logged to the system error log. This field +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index 8d2f9ed3f1076..a7fe113897361 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -664,12 +664,6 @@ + loops can be debugged more effectively on production + systems. + +- clocksource.max_cswd_read_retries= [KNL] +- Number of clocksource_watchdog() retries due to +- external delays before the clock will be marked +- unstable. Defaults to two retries, that is, +- three attempts to read the clock under test. +- + clocksource.verify_n_cpus= [KNL] + Limit the number of CPUs checked for clocksources + marked with CLOCK_SOURCE_VERIFY_PERCPU that +@@ -4655,11 +4649,9 @@ + + profile= [KNL] Enable kernel profiling via /proc/profile + Format: [,] +- Param: : "schedule", "sleep", or "kvm" ++ Param: : "schedule" or "kvm" + [defaults to kernel profiling] + Param: "schedule" - profile schedule points. +- Param: "sleep" - profile D-state sleeping (millisecs). +- Requires CONFIG_SCHEDSTATS + Param: "kvm" - profile VM exits. + Param: - step/bucket size as a power of 2 for + statistical time based profiling. +diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst +index 29fd5213eeb2b..357d6cb98161f 100644 +--- a/Documentation/arch/arm64/silicon-errata.rst ++++ b/Documentation/arch/arm64/silicon-errata.rst +@@ -119,32 +119,68 @@ stable kernels. + +----------------+-----------------+-----------------+-----------------------------+ + | ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 | + +----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Cortex-A76 | #3324349 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ + | ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 | + +----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Cortex-A77 | #3324348 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Cortex-A78 | #3324344 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Cortex-A78C | #3324346,3324347| ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ + | ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 | + +----------------+-----------------+-----------------+-----------------------------+ + | ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 | + +----------------+-----------------+-----------------+-----------------------------+ + | ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 | + +----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Cortex-A710 | #3324338 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ + | ARM | Cortex-A715 | #2645198 | ARM64_ERRATUM_2645198 | + +----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Cortex-A720 | #3456091 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Cortex-A725 | #3456106 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Cortex-X1 | #3324344 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Cortex-X1C | #3324346 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ + | ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 | + +----------------+-----------------+-----------------+-----------------------------+ + | ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 | + +----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Cortex-X2 | #3324338 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Cortex-X3 | #3324335 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Cortex-X4 | #3194386 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Cortex-X925 | #3324334 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ + | ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 | + +----------------+-----------------+-----------------+-----------------------------+ + | ARM | Neoverse-N1 | #1349291 | N/A | + +----------------+-----------------+-----------------+-----------------------------+ + | ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 | + +----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Neoverse-N1 | #3324349 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ + | ARM | Neoverse-N2 | #2139208 | ARM64_ERRATUM_2139208 | + +----------------+-----------------+-----------------+-----------------------------+ + | ARM | Neoverse-N2 | #2067961 | ARM64_ERRATUM_2067961 | + +----------------+-----------------+-----------------+-----------------------------+ + | ARM | Neoverse-N2 | #2253138 | ARM64_ERRATUM_2253138 | + +----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Neoverse-N2 | #3324339 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Neoverse-V1 | #3324341 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Neoverse-V2 | #3324336 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ + | ARM | MMU-500 | #841119,826419 | N/A | + +----------------+-----------------+-----------------+-----------------------------+ + | ARM | MMU-600 | #1076982,1209401| N/A | +diff --git a/Documentation/hwmon/corsair-psu.rst b/Documentation/hwmon/corsair-psu.rst +index 16db34d464dd6..7ed794087f848 100644 +--- a/Documentation/hwmon/corsair-psu.rst ++++ b/Documentation/hwmon/corsair-psu.rst +@@ -15,11 +15,11 @@ Supported devices: + + Corsair HX850i + +- Corsair HX1000i (Series 2022 and 2023) ++ Corsair HX1000i (Legacy and Series 2023) + +- Corsair HX1200i ++ Corsair HX1200i (Legacy and Series 2023) + +- Corsair HX1500i (Series 2022 and 2023) ++ Corsair HX1500i (Legacy and Series 2023) + + Corsair RM550i + +diff --git a/Makefile b/Makefile +index 0bd4bee2128b4..77de99984c2f1 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 45 ++SUBLEVEL = 46 + EXTRAVERSION = + NAME = Hurr durr I'ma ninja sloth + +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig +index f9777ce2ccb2d..9e0c1ac3d13ee 100644 +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -1068,6 +1068,44 @@ config ARM64_ERRATUM_3117295 + + If unsure, say Y. + ++config ARM64_ERRATUM_3194386 ++ bool "Cortex-*/Neoverse-*: workaround for MSR SSBS not self-synchronizing" ++ default y ++ help ++ This option adds the workaround for the following errata: ++ ++ * ARM Cortex-A76 erratum 3324349 ++ * ARM Cortex-A77 erratum 3324348 ++ * ARM Cortex-A78 erratum 3324344 ++ * ARM Cortex-A78C erratum 3324346 ++ * ARM Cortex-A78C erratum 3324347 ++ * ARM Cortex-A710 erratam 3324338 ++ * ARM Cortex-A720 erratum 3456091 ++ * ARM Cortex-A725 erratum 3456106 ++ * ARM Cortex-X1 erratum 3324344 ++ * ARM Cortex-X1C erratum 3324346 ++ * ARM Cortex-X2 erratum 3324338 ++ * ARM Cortex-X3 erratum 3324335 ++ * ARM Cortex-X4 erratum 3194386 ++ * ARM Cortex-X925 erratum 3324334 ++ * ARM Neoverse-N1 erratum 3324349 ++ * ARM Neoverse N2 erratum 3324339 ++ * ARM Neoverse-V1 erratum 3324341 ++ * ARM Neoverse V2 erratum 3324336 ++ * ARM Neoverse-V3 erratum 3312417 ++ ++ On affected cores "MSR SSBS, #0" instructions may not affect ++ subsequent speculative instructions, which may permit unexepected ++ speculative store bypassing. ++ ++ Work around this problem by placing a Speculation Barrier (SB) or ++ Instruction Synchronization Barrier (ISB) after kernel changes to ++ SSBS. The presence of the SSBS special-purpose register is hidden ++ from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such that userspace ++ will use the PR_SPEC_STORE_BYPASS prctl to change SSBS. ++ ++ If unsure, say Y. ++ + config CAVIUM_ERRATUM_22375 + bool "Cavium erratum 22375, 24313" + default y +diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h +index cf2987464c186..1ca947d5c9396 100644 +--- a/arch/arm64/include/asm/barrier.h ++++ b/arch/arm64/include/asm/barrier.h +@@ -40,6 +40,10 @@ + */ + #define dgh() asm volatile("hint #6" : : : "memory") + ++#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \ ++ SB_BARRIER_INSN"nop\n", \ ++ ARM64_HAS_SB)) ++ + #ifdef CONFIG_ARM64_PSEUDO_NMI + #define pmr_sync() \ + do { \ +diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h +index 52f076afeb960..5fd7caea44193 100644 +--- a/arch/arm64/include/asm/cputype.h ++++ b/arch/arm64/include/asm/cputype.h +@@ -86,6 +86,14 @@ + #define ARM_CPU_PART_CORTEX_X2 0xD48 + #define ARM_CPU_PART_NEOVERSE_N2 0xD49 + #define ARM_CPU_PART_CORTEX_A78C 0xD4B ++#define ARM_CPU_PART_CORTEX_X1C 0xD4C ++#define ARM_CPU_PART_CORTEX_X3 0xD4E ++#define ARM_CPU_PART_NEOVERSE_V2 0xD4F ++#define ARM_CPU_PART_CORTEX_A720 0xD81 ++#define ARM_CPU_PART_CORTEX_X4 0xD82 ++#define ARM_CPU_PART_NEOVERSE_V3 0xD84 ++#define ARM_CPU_PART_CORTEX_X925 0xD85 ++#define ARM_CPU_PART_CORTEX_A725 0xD87 + + #define APM_CPU_PART_XGENE 0x000 + #define APM_CPU_VAR_POTENZA 0x00 +@@ -159,6 +167,14 @@ + #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2) + #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) + #define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C) ++#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C) ++#define MIDR_CORTEX_X3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X3) ++#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2) ++#define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720) ++#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4) ++#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3) ++#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925) ++#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725) + #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) + #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) + #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c +index 7bba831f62c33..57b1d6a68256b 100644 +--- a/arch/arm64/kernel/cpu_errata.c ++++ b/arch/arm64/kernel/cpu_errata.c +@@ -448,6 +448,30 @@ static const struct midr_range erratum_spec_unpriv_load_list[] = { + }; + #endif + ++#ifdef CONFIG_ARM64_ERRATUM_3194386 ++static const struct midr_range erratum_spec_ssbs_list[] = { ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A720), ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A725), ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C), ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X3), ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X4), ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X925), ++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), ++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), ++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), ++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2), ++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3), ++ {} ++}; ++#endif ++ + const struct arm64_cpu_capabilities arm64_errata[] = { + #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE + { +@@ -746,6 +770,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = { + .cpu_enable = cpu_clear_bf16_from_user_emulation, + }, + #endif ++#ifdef CONFIG_ARM64_ERRATUM_3194386 ++ { ++ .desc = "SSBS not fully self-synchronizing", ++ .capability = ARM64_WORKAROUND_SPECULATIVE_SSBS, ++ ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list), ++ }, ++#endif + #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD + { + .desc = "ARM errata 2966298, 3117295", +diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c +index 444a73c2e6385..7e96604559004 100644 +--- a/arch/arm64/kernel/cpufeature.c ++++ b/arch/arm64/kernel/cpufeature.c +@@ -2190,6 +2190,17 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap) + } + #endif /* CONFIG_ARM64_MTE */ + ++static void user_feature_fixup(void) ++{ ++ if (cpus_have_cap(ARM64_WORKAROUND_SPECULATIVE_SSBS)) { ++ struct arm64_ftr_reg *regp; ++ ++ regp = get_arm64_ftr_reg(SYS_ID_AA64PFR1_EL1); ++ if (regp) ++ regp->user_mask &= ~ID_AA64PFR1_EL1_SSBS_MASK; ++ } ++} ++ + static void elf_hwcap_fixup(void) + { + #ifdef CONFIG_ARM64_ERRATUM_1742098 +@@ -3345,6 +3356,7 @@ void __init setup_cpu_features(void) + u32 cwg; + + setup_system_capabilities(); ++ user_feature_fixup(); + setup_elf_hwcaps(arm64_elf_hwcaps); + + if (system_supports_32bit_el0()) { +diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c +index 05f40c4e18fda..57503dc4b22fa 100644 +--- a/arch/arm64/kernel/proton-pack.c ++++ b/arch/arm64/kernel/proton-pack.c +@@ -558,6 +558,18 @@ static enum mitigation_state spectre_v4_enable_hw_mitigation(void) + + /* SCTLR_EL1.DSSBS was initialised to 0 during boot */ + set_pstate_ssbs(0); ++ ++ /* ++ * SSBS is self-synchronizing and is intended to affect subsequent ++ * speculative instructions, but some CPUs can speculate with a stale ++ * value of SSBS. ++ * ++ * Mitigate this with an unconditional speculation barrier, as CPUs ++ * could mis-speculate branches and bypass a conditional barrier. ++ */ ++ if (IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386)) ++ spec_bar(); ++ + return SPECTRE_MITIGATED; + } + +diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps +index 5511bee15603a..c251ef3caae56 100644 +--- a/arch/arm64/tools/cpucaps ++++ b/arch/arm64/tools/cpucaps +@@ -99,4 +99,5 @@ WORKAROUND_NVIDIA_CARMEL_CNP + WORKAROUND_QCOM_FALKOR_E1003 + WORKAROUND_REPEAT_TLBI + WORKAROUND_SPECULATIVE_AT ++WORKAROUND_SPECULATIVE_SSBS + WORKAROUND_SPECULATIVE_UNPRIV_LOAD +diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c +index 9fc10cea21e10..de4f3def4af0b 100644 +--- a/arch/loongarch/kernel/efi.c ++++ b/arch/loongarch/kernel/efi.c +@@ -66,6 +66,12 @@ void __init efi_runtime_init(void) + set_bit(EFI_RUNTIME_SERVICES, &efi.flags); + } + ++bool efi_poweroff_required(void) ++{ ++ return efi_enabled(EFI_RUNTIME_SERVICES) && ++ (acpi_gbl_reduced_hardware || acpi_no_s5); ++} ++ + unsigned long __initdata screen_info_table = EFI_INVALID_TABLE_ADDR; + + static void __init init_screen_info(void) +diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig +index 2834a64064970..a077e6bf9475f 100644 +--- a/arch/parisc/Kconfig ++++ b/arch/parisc/Kconfig +@@ -18,6 +18,7 @@ config PARISC + select ARCH_SUPPORTS_HUGETLBFS if PA20 + select ARCH_SUPPORTS_MEMORY_FAILURE + select ARCH_STACKWALK ++ select ARCH_HAS_CACHE_LINE_SIZE + select ARCH_HAS_DEBUG_VM_PGTABLE + select HAVE_RELIABLE_STACKTRACE + select DMA_OPS +diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h +index 2a60d7a72f1fa..a3f0f100f2194 100644 +--- a/arch/parisc/include/asm/cache.h ++++ b/arch/parisc/include/asm/cache.h +@@ -20,7 +20,16 @@ + + #define SMP_CACHE_BYTES L1_CACHE_BYTES + +-#define ARCH_DMA_MINALIGN L1_CACHE_BYTES ++#ifdef CONFIG_PA20 ++#define ARCH_DMA_MINALIGN 128 ++#else ++#define ARCH_DMA_MINALIGN 32 ++#endif ++#define ARCH_KMALLOC_MINALIGN 16 /* ldcw requires 16-byte alignment */ ++ ++#define arch_slab_minalign() ((unsigned)dcache_stride) ++#define cache_line_size() dcache_stride ++#define dma_get_cache_alignment cache_line_size + + #define __read_mostly __section(".data..read_mostly") + +diff --git a/arch/parisc/net/bpf_jit_core.c b/arch/parisc/net/bpf_jit_core.c +index d6ee2fd455503..7b9cb3cda27ee 100644 +--- a/arch/parisc/net/bpf_jit_core.c ++++ b/arch/parisc/net/bpf_jit_core.c +@@ -114,7 +114,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) + jit_data->header = + bpf_jit_binary_alloc(prog_size + extable_size, + &jit_data->image, +- sizeof(u32), ++ sizeof(long), + bpf_fill_ill_insns); + if (!jit_data->header) { + prog = orig_prog; +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h +index 621bac6b74011..24b7bd255e983 100644 +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -237,6 +237,7 @@ + #define MSR_INTEGRITY_CAPS_ARRAY_BIST BIT(MSR_INTEGRITY_CAPS_ARRAY_BIST_BIT) + #define MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT 4 + #define MSR_INTEGRITY_CAPS_PERIODIC_BIST BIT(MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT) ++#define MSR_INTEGRITY_CAPS_SAF_GEN_MASK GENMASK_ULL(10, 9) + + #define MSR_LBR_NHM_FROM 0x00000680 + #define MSR_LBR_NHM_TO 0x000006c0 +diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h +index cde8357bb226d..e897046c5d2c6 100644 +--- a/arch/x86/include/asm/qspinlock.h ++++ b/arch/x86/include/asm/qspinlock.h +@@ -66,13 +66,15 @@ static inline bool vcpu_is_preempted(long cpu) + + #ifdef CONFIG_PARAVIRT + /* +- * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack. ++ * virt_spin_lock_key - disables by default the virt_spin_lock() hijack. + * +- * Native (and PV wanting native due to vCPU pinning) should disable this key. +- * It is done in this backwards fashion to only have a single direction change, +- * which removes ordering between native_pv_spin_init() and HV setup. ++ * Native (and PV wanting native due to vCPU pinning) should keep this key ++ * disabled. Native does not touch the key. ++ * ++ * When in a guest then native_pv_lock_init() enables the key first and ++ * KVM/XEN might conditionally disable it later in the boot process again. + */ +-DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key); ++DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key); + + /* + * Shortcut for the queued_spin_lock_slowpath() function that allows +diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c +index 767bf1c71aadd..2a2fc14955cd3 100644 +--- a/arch/x86/kernel/cpu/mtrr/mtrr.c ++++ b/arch/x86/kernel/cpu/mtrr/mtrr.c +@@ -609,7 +609,7 @@ void mtrr_save_state(void) + { + int first_cpu; + +- if (!mtrr_enabled()) ++ if (!mtrr_enabled() || !mtrr_state.have_fixed) + return; + + first_cpu = cpumask_first(cpu_online_mask); +diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c +index 97f1436c1a203..8d51c86caa415 100644 +--- a/arch/x86/kernel/paravirt.c ++++ b/arch/x86/kernel/paravirt.c +@@ -71,13 +71,12 @@ DEFINE_PARAVIRT_ASM(pv_native_irq_enable, "sti", .noinstr.text); + DEFINE_PARAVIRT_ASM(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text); + #endif + +-DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key); ++DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key); + + void __init native_pv_lock_init(void) + { +- if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && +- !boot_cpu_has(X86_FEATURE_HYPERVISOR)) +- static_branch_disable(&virt_spin_lock_key); ++ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) ++ static_branch_enable(&virt_spin_lock_key); + } + + static void native_tlb_remove_table(struct mmu_gather *tlb, void *table) +diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c +index 51b6b78e6b175..41d8c8f475a7c 100644 +--- a/arch/x86/mm/pti.c ++++ b/arch/x86/mm/pti.c +@@ -374,14 +374,14 @@ pti_clone_pgtable(unsigned long start, unsigned long end, + */ + *target_pmd = *pmd; + +- addr += PMD_SIZE; ++ addr = round_up(addr + 1, PMD_SIZE); + + } else if (level == PTI_CLONE_PTE) { + + /* Walk the page-table down to the pte level */ + pte = pte_offset_kernel(pmd, addr); + if (pte_none(*pte)) { +- addr += PAGE_SIZE; ++ addr = round_up(addr + 1, PAGE_SIZE); + continue; + } + +@@ -401,7 +401,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end, + /* Clone the PTE */ + *target_pte = *pte; + +- addr += PAGE_SIZE; ++ addr = round_up(addr + 1, PAGE_SIZE); + + } else { + BUG(); +@@ -496,7 +496,7 @@ static void pti_clone_entry_text(void) + { + pti_clone_pgtable((unsigned long) __entry_text_start, + (unsigned long) __entry_text_end, +- PTI_CLONE_PMD); ++ PTI_LEVEL_KERNEL_IMAGE); + } + + /* +diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c +index 969bf81e8d546..7f7ad94f22b91 100644 +--- a/drivers/acpi/battery.c ++++ b/drivers/acpi/battery.c +@@ -678,12 +678,18 @@ static ssize_t acpi_battery_alarm_store(struct device *dev, + return count; + } + +-static const struct device_attribute alarm_attr = { ++static struct device_attribute alarm_attr = { + .attr = {.name = "alarm", .mode = 0644}, + .show = acpi_battery_alarm_show, + .store = acpi_battery_alarm_store, + }; + ++static struct attribute *acpi_battery_attrs[] = { ++ &alarm_attr.attr, ++ NULL ++}; ++ATTRIBUTE_GROUPS(acpi_battery); ++ + /* + * The Battery Hooking API + * +@@ -823,7 +829,10 @@ static void __exit battery_hook_exit(void) + + static int sysfs_add_battery(struct acpi_battery *battery) + { +- struct power_supply_config psy_cfg = { .drv_data = battery, }; ++ struct power_supply_config psy_cfg = { ++ .drv_data = battery, ++ .attr_grp = acpi_battery_groups, ++ }; + bool full_cap_broken = false; + + if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) && +@@ -868,7 +877,7 @@ static int sysfs_add_battery(struct acpi_battery *battery) + return result; + } + battery_hook_add_battery(battery); +- return device_create_file(&battery->bat->dev, &alarm_attr); ++ return 0; + } + + static void sysfs_remove_battery(struct acpi_battery *battery) +@@ -879,7 +888,6 @@ static void sysfs_remove_battery(struct acpi_battery *battery) + return; + } + battery_hook_remove_battery(battery); +- device_remove_file(&battery->bat->dev, &alarm_attr); + power_supply_unregister(battery->bat); + battery->bat = NULL; + mutex_unlock(&battery->sysfs_lock); +diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c +index 94e3c000df2e1..fdeb46ed21d69 100644 +--- a/drivers/acpi/sbs.c ++++ b/drivers/acpi/sbs.c +@@ -77,7 +77,6 @@ struct acpi_battery { + u16 spec; + u8 id; + u8 present:1; +- u8 have_sysfs_alarm:1; + }; + + #define to_acpi_battery(x) power_supply_get_drvdata(x) +@@ -462,12 +461,18 @@ static ssize_t acpi_battery_alarm_store(struct device *dev, + return count; + } + +-static const struct device_attribute alarm_attr = { ++static struct device_attribute alarm_attr = { + .attr = {.name = "alarm", .mode = 0644}, + .show = acpi_battery_alarm_show, + .store = acpi_battery_alarm_store, + }; + ++static struct attribute *acpi_battery_attrs[] = { ++ &alarm_attr.attr, ++ NULL ++}; ++ATTRIBUTE_GROUPS(acpi_battery); ++ + /* -------------------------------------------------------------------------- + Driver Interface + -------------------------------------------------------------------------- */ +@@ -518,7 +523,10 @@ static int acpi_battery_read(struct acpi_battery *battery) + static int acpi_battery_add(struct acpi_sbs *sbs, int id) + { + struct acpi_battery *battery = &sbs->battery[id]; +- struct power_supply_config psy_cfg = { .drv_data = battery, }; ++ struct power_supply_config psy_cfg = { ++ .drv_data = battery, ++ .attr_grp = acpi_battery_groups, ++ }; + int result; + + battery->id = id; +@@ -548,10 +556,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id) + goto end; + } + +- result = device_create_file(&battery->bat->dev, &alarm_attr); +- if (result) +- goto end; +- battery->have_sysfs_alarm = 1; + end: + pr_info("%s [%s]: Battery Slot [%s] (battery %s)\n", + ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device), +@@ -563,11 +567,8 @@ static void acpi_battery_remove(struct acpi_sbs *sbs, int id) + { + struct acpi_battery *battery = &sbs->battery[id]; + +- if (battery->bat) { +- if (battery->have_sysfs_alarm) +- device_remove_file(&battery->bat->dev, &alarm_attr); ++ if (battery->bat) + power_supply_unregister(battery->bat); +- } + } + + static int acpi_charger_add(struct acpi_sbs *sbs) +diff --git a/drivers/base/core.c b/drivers/base/core.c +index aeb4644817d57..cb323700e952f 100644 +--- a/drivers/base/core.c ++++ b/drivers/base/core.c +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -2565,6 +2566,7 @@ static const char *dev_uevent_name(const struct kobject *kobj) + static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) + { + const struct device *dev = kobj_to_dev(kobj); ++ struct device_driver *driver; + int retval = 0; + + /* add device node properties if present */ +@@ -2593,8 +2595,12 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) + if (dev->type && dev->type->name) + add_uevent_var(env, "DEVTYPE=%s", dev->type->name); + +- if (dev->driver) +- add_uevent_var(env, "DRIVER=%s", dev->driver->name); ++ /* Synchronize with module_remove_driver() */ ++ rcu_read_lock(); ++ driver = READ_ONCE(dev->driver); ++ if (driver) ++ add_uevent_var(env, "DRIVER=%s", driver->name); ++ rcu_read_unlock(); + + /* Add common DT information about the device */ + of_device_uevent(dev, env); +@@ -2664,11 +2670,8 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, + if (!env) + return -ENOMEM; + +- /* Synchronize with really_probe() */ +- device_lock(dev); + /* let the kset specific function add its keys */ + retval = kset->uevent_ops->uevent(&dev->kobj, env); +- device_unlock(dev); + if (retval) + goto out; + +diff --git a/drivers/base/module.c b/drivers/base/module.c +index a1b55da07127d..b0b79b9c189d4 100644 +--- a/drivers/base/module.c ++++ b/drivers/base/module.c +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + #include "base.h" + + static char *make_driver_name(struct device_driver *drv) +@@ -97,6 +98,9 @@ void module_remove_driver(struct device_driver *drv) + if (!drv) + return; + ++ /* Synchronize with dev_uevent() */ ++ synchronize_rcu(); ++ + sysfs_remove_link(&drv->p->kobj, "module"); + + if (drv->owner) +diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c +index 83e8e27a5ecec..b5d40e0e05f31 100644 +--- a/drivers/bluetooth/btnxpuart.c ++++ b/drivers/bluetooth/btnxpuart.c +@@ -340,7 +340,7 @@ static void ps_cancel_timer(struct btnxpuart_dev *nxpdev) + struct ps_data *psdata = &nxpdev->psdata; + + flush_work(&psdata->work); +- del_timer_sync(&psdata->ps_timer); ++ timer_shutdown_sync(&psdata->ps_timer); + } + + static void ps_control(struct hci_dev *hdev, u8 ps_state) +diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c +index 26919556ef5f0..b72b36e0abed8 100644 +--- a/drivers/clocksource/sh_cmt.c ++++ b/drivers/clocksource/sh_cmt.c +@@ -528,6 +528,7 @@ static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta) + static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) + { + struct sh_cmt_channel *ch = dev_id; ++ unsigned long flags; + + /* clear flags */ + sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) & +@@ -558,6 +559,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) + + ch->flags &= ~FLAG_SKIPEVENT; + ++ raw_spin_lock_irqsave(&ch->lock, flags); ++ + if (ch->flags & FLAG_REPROGRAM) { + ch->flags &= ~FLAG_REPROGRAM; + sh_cmt_clock_event_program_verify(ch, 1); +@@ -570,6 +573,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) + + ch->flags &= ~FLAG_IRQCONTEXT; + ++ raw_spin_unlock_irqrestore(&ch->lock, flags); ++ + return IRQ_HANDLED; + } + +@@ -780,12 +785,18 @@ static int sh_cmt_clock_event_next(unsigned long delta, + struct clock_event_device *ced) + { + struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); ++ unsigned long flags; + + BUG_ON(!clockevent_state_oneshot(ced)); ++ ++ raw_spin_lock_irqsave(&ch->lock, flags); ++ + if (likely(ch->flags & FLAG_IRQCONTEXT)) + ch->next_match_value = delta - 1; + else +- sh_cmt_set_next(ch, delta - 1); ++ __sh_cmt_set_next(ch, delta - 1); ++ ++ raw_spin_unlock_irqrestore(&ch->lock, flags); + + return 0; + } +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c +index 1c512ed3fa6d9..5c0016c77d2ab 100644 +--- a/drivers/gpio/gpiolib.c ++++ b/drivers/gpio/gpiolib.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -164,7 +165,7 @@ struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc, + if (hwnum >= gdev->ngpio) + return ERR_PTR(-EINVAL); + +- return &gdev->descs[hwnum]; ++ return &gdev->descs[array_index_nospec(hwnum, gdev->ngpio)]; + } + EXPORT_SYMBOL_GPL(gpiochip_get_desc); + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +index ea1bce13db941..eb663eb811563 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +@@ -3561,6 +3561,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, + mutex_init(&adev->grbm_idx_mutex); + mutex_init(&adev->mn_lock); + mutex_init(&adev->virt.vf_errors.lock); ++ mutex_init(&adev->virt.rlcg_reg_lock); + hash_init(adev->mn_hash); + mutex_init(&adev->psp.mutex); + mutex_init(&adev->notifier_lock); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +index de9d7f3dc2336..99dd86337e841 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +@@ -258,9 +258,8 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job, + struct dma_fence *fence = NULL; + int r; + +- /* Ignore soft recovered fences here */ + r = drm_sched_entity_error(s_entity); +- if (r && r != -ENODATA) ++ if (r) + goto error; + + if (!fence && job->gang_submit) +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c +index ca5c86e5f7cd6..8e8afbd237bcd 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c +@@ -334,7 +334,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size + + set_ta_context_funcs(psp, ta_type, &context); + +- if (!context->initialized) { ++ if (!context || !context->initialized) { + dev_err(adev->dev, "TA is not initialized\n"); + ret = -EINVAL; + goto err_free_shared_buf; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +index 67b75ff0f7c37..7cba98f8bbdca 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +@@ -1780,12 +1780,15 @@ static void amdgpu_ras_interrupt_process_handler(struct work_struct *work) + int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, + struct ras_dispatch_if *info) + { +- struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); +- struct ras_ih_data *data = &obj->ih_data; ++ struct ras_manager *obj; ++ struct ras_ih_data *data; + ++ obj = amdgpu_ras_find_obj(adev, &info->head); + if (!obj) + return -EINVAL; + ++ data = &obj->ih_data; ++ + if (data->inuse == 0) + return 0; + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +index 96857ae7fb5bc..ff4f52e07cc0d 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +@@ -1003,6 +1003,9 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v + scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1; + scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2; + scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3; ++ ++ mutex_lock(&adev->virt.rlcg_reg_lock); ++ + if (reg_access_ctrl->spare_int) + spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int; + +@@ -1058,6 +1061,9 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v + } + + ret = readl(scratch_reg0); ++ ++ mutex_unlock(&adev->virt.rlcg_reg_lock); ++ + return ret; + } + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +index fabb83e9d9aec..23b6efa9d25df 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +@@ -263,6 +263,8 @@ struct amdgpu_virt { + + /* the ucode id to signal the autoload */ + uint32_t autoload_ucode_id; ++ ++ struct mutex rlcg_reg_lock; + }; + + struct amdgpu_video_codec_info; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +index 349416e176a12..1cf1498204678 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +@@ -102,6 +102,11 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, + if (!r) + r = amdgpu_sync_push_to_job(&sync, p->job); + amdgpu_sync_free(&sync); ++ ++ if (r) { ++ p->num_dw_left = 0; ++ amdgpu_job_free(p->job); ++ } + return r; + } + +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index 60db3800666ec..94059aef762be 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -2628,7 +2628,8 @@ static int dm_suspend(void *handle) + + dm->cached_dc_state = dc_copy_state(dm->dc->current_state); + +- dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); ++ if (dm->cached_dc_state) ++ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); + + amdgpu_dm_commit_zero_streams(dm->dc); + +@@ -6483,7 +6484,8 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) + aconnector->dc_sink = aconnector->dc_link->local_sink ? + aconnector->dc_link->local_sink : + aconnector->dc_em_sink; +- dc_sink_retain(aconnector->dc_sink); ++ if (aconnector->dc_sink) ++ dc_sink_retain(aconnector->dc_sink); + } + } + +@@ -7296,7 +7298,8 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) + drm_add_modes_noedid(connector, 1920, 1080); + } else { + amdgpu_dm_connector_ddc_get_modes(connector, edid); +- amdgpu_dm_connector_add_common_modes(encoder, connector); ++ if (encoder) ++ amdgpu_dm_connector_add_common_modes(encoder, connector); + amdgpu_dm_connector_add_freesync_modes(connector, edid); + } + amdgpu_dm_fbc_init(connector); +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +index 2104511f3b863..3880ddf1c820f 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +@@ -1266,6 +1266,9 @@ static bool is_dsc_need_re_compute( + } + } + ++ if (new_stream_on_link_num == 0) ++ return false; ++ + /* check current_state if there stream on link but it is not in + * new request state + */ +diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c +index b621b97711b61..a7f5b0f6272ce 100644 +--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c ++++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c +@@ -162,7 +162,12 @@ static void set_hpo_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *lin + link_res->hpo_dp_link_enc->funcs->set_link_test_pattern( + link_res->hpo_dp_link_enc, tp_params); + } ++ + link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); ++ ++ // Give retimer extra time to lock before updating DP_TRAINING_PATTERN_SET to TPS1 ++ if (tp_params->dp_phy_pattern == DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE) ++ msleep(30); + } + + static void set_hpo_fixed_vs_pe_retimer_dp_lane_settings(struct dc_link *link, +diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +index 9e4f8a4104a34..7bf46e4974f88 100644 +--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c ++++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +@@ -927,7 +927,7 @@ static int pp_dpm_switch_power_profile(void *handle, + enum PP_SMC_POWER_PROFILE type, bool en) + { + struct pp_hwmgr *hwmgr = handle; +- long workload; ++ long workload[1]; + uint32_t index; + + if (!hwmgr || !hwmgr->pm_en) +@@ -945,12 +945,12 @@ static int pp_dpm_switch_power_profile(void *handle, + hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]); + index = fls(hwmgr->workload_mask); + index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0; +- workload = hwmgr->workload_setting[index]; ++ workload[0] = hwmgr->workload_setting[index]; + } else { + hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]); + index = fls(hwmgr->workload_mask); + index = index <= Workload_Policy_Max ? index - 1 : 0; +- workload = hwmgr->workload_setting[index]; ++ workload[0] = hwmgr->workload_setting[index]; + } + + if (type == PP_SMC_POWER_PROFILE_COMPUTE && +@@ -960,7 +960,7 @@ static int pp_dpm_switch_power_profile(void *handle, + } + + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) +- hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0); ++ hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0); + + return 0; + } +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c +index 1d829402cd2e2..f4bd8e9357e22 100644 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c +@@ -269,7 +269,7 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set + struct pp_power_state *new_ps) + { + uint32_t index; +- long workload; ++ long workload[1]; + + if (hwmgr->not_vf) { + if (!skip_display_settings) +@@ -294,10 +294,10 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { + index = fls(hwmgr->workload_mask); + index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0; +- workload = hwmgr->workload_setting[index]; ++ workload[0] = hwmgr->workload_setting[index]; + +- if (hwmgr->power_profile_mode != workload && hwmgr->hwmgr_func->set_power_profile_mode) +- hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0); ++ if (hwmgr->power_profile_mode != workload[0] && hwmgr->hwmgr_func->set_power_profile_mode) ++ hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0); + } + + return 0; +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +index aa91730e4eaff..163864bd51c34 100644 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +@@ -2957,6 +2957,7 @@ static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr) + + static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) + { ++ struct amdgpu_device *adev = hwmgr->adev; + struct smu7_hwmgr *data; + int result = 0; + +@@ -2993,40 +2994,37 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) + /* Initalize Dynamic State Adjustment Rule Settings */ + result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr); + +- if (0 == result) { +- struct amdgpu_device *adev = hwmgr->adev; ++ if (result) ++ goto fail; + +- data->is_tlu_enabled = false; ++ data->is_tlu_enabled = false; + +- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = ++ hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = + SMU7_MAX_HARDWARE_POWERLEVELS; +- hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; +- hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; ++ hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; ++ hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; + +- data->pcie_gen_cap = adev->pm.pcie_gen_mask; +- if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) +- data->pcie_spc_cap = 20; +- else +- data->pcie_spc_cap = 16; +- data->pcie_lane_cap = adev->pm.pcie_mlw_mask; +- +- hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ +-/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ +- hwmgr->platform_descriptor.clockStep.engineClock = 500; +- hwmgr->platform_descriptor.clockStep.memoryClock = 500; +- smu7_thermal_parameter_init(hwmgr); +- } else { +- /* Ignore return value in here, we are cleaning up a mess. */ +- smu7_hwmgr_backend_fini(hwmgr); +- } ++ data->pcie_gen_cap = adev->pm.pcie_gen_mask; ++ if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) ++ data->pcie_spc_cap = 20; ++ else ++ data->pcie_spc_cap = 16; ++ data->pcie_lane_cap = adev->pm.pcie_mlw_mask; ++ ++ hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ ++ /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ ++ hwmgr->platform_descriptor.clockStep.engineClock = 500; ++ hwmgr->platform_descriptor.clockStep.memoryClock = 500; ++ smu7_thermal_parameter_init(hwmgr); + + result = smu7_update_edc_leakage_table(hwmgr); +- if (result) { +- smu7_hwmgr_backend_fini(hwmgr); +- return result; +- } ++ if (result) ++ goto fail; + + return 0; ++fail: ++ smu7_hwmgr_backend_fini(hwmgr); ++ return result; + } + + static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) +@@ -3316,8 +3314,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, + const struct pp_power_state *current_ps) + { + struct amdgpu_device *adev = hwmgr->adev; +- struct smu7_power_state *smu7_ps = +- cast_phw_smu7_power_state(&request_ps->hardware); ++ struct smu7_power_state *smu7_ps; + uint32_t sclk; + uint32_t mclk; + struct PP_Clocks minimum_clocks = {0}; +@@ -3334,6 +3331,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, + uint32_t latency; + bool latency_allowed = false; + ++ smu7_ps = cast_phw_smu7_power_state(&request_ps->hardware); ++ if (!smu7_ps) ++ return -EINVAL; ++ + data->battery_state = (PP_StateUILabel_Battery == + request_ps->classification.ui_label); + data->mclk_ignore_signal = false; +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +index b015a601b385a..eb744401e0567 100644 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +@@ -1065,16 +1065,18 @@ static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, + struct pp_power_state *prequest_ps, + const struct pp_power_state *pcurrent_ps) + { +- struct smu8_power_state *smu8_ps = +- cast_smu8_power_state(&prequest_ps->hardware); +- +- const struct smu8_power_state *smu8_current_ps = +- cast_const_smu8_power_state(&pcurrent_ps->hardware); +- ++ struct smu8_power_state *smu8_ps; ++ const struct smu8_power_state *smu8_current_ps; + struct smu8_hwmgr *data = hwmgr->backend; + struct PP_Clocks clocks = {0, 0, 0, 0}; + bool force_high; + ++ smu8_ps = cast_smu8_power_state(&prequest_ps->hardware); ++ smu8_current_ps = cast_const_smu8_power_state(&pcurrent_ps->hardware); ++ ++ if (!smu8_ps || !smu8_current_ps) ++ return -EINVAL; ++ + smu8_ps->need_dfs_bypass = true; + + data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +index 6d6bc6a380b36..d43a530aba0e3 100644 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +@@ -3259,8 +3259,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, + const struct pp_power_state *current_ps) + { + struct amdgpu_device *adev = hwmgr->adev; +- struct vega10_power_state *vega10_ps = +- cast_phw_vega10_power_state(&request_ps->hardware); ++ struct vega10_power_state *vega10_ps; + uint32_t sclk; + uint32_t mclk; + struct PP_Clocks minimum_clocks = {0}; +@@ -3278,6 +3277,10 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, + uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; + uint32_t latency; + ++ vega10_ps = cast_phw_vega10_power_state(&request_ps->hardware); ++ if (!vega10_ps) ++ return -EINVAL; ++ + data->battery_state = (PP_StateUILabel_Battery == + request_ps->classification.ui_label); + +@@ -3415,13 +3418,17 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co + const struct vega10_power_state *vega10_ps = + cast_const_phw_vega10_power_state(states->pnew_state); + struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); +- uint32_t sclk = vega10_ps->performance_levels +- [vega10_ps->performance_level_count - 1].gfx_clock; + struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); +- uint32_t mclk = vega10_ps->performance_levels +- [vega10_ps->performance_level_count - 1].mem_clock; ++ uint32_t sclk, mclk; + uint32_t i; + ++ if (vega10_ps == NULL) ++ return -EINVAL; ++ sclk = vega10_ps->performance_levels ++ [vega10_ps->performance_level_count - 1].gfx_clock; ++ mclk = vega10_ps->performance_levels ++ [vega10_ps->performance_level_count - 1].mem_clock; ++ + for (i = 0; i < sclk_table->count; i++) { + if (sclk == sclk_table->dpm_levels[i].value) + break; +@@ -3728,6 +3735,9 @@ static int vega10_generate_dpm_level_enable_mask( + cast_const_phw_vega10_power_state(states->pnew_state); + int i; + ++ if (vega10_ps == NULL) ++ return -EINVAL; ++ + PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps), + "Attempt to Trim DPM States Failed!", + return -1); +@@ -4995,6 +5005,8 @@ static int vega10_check_states_equal(struct pp_hwmgr *hwmgr, + + vega10_psa = cast_const_phw_vega10_power_state(pstate1); + vega10_psb = cast_const_phw_vega10_power_state(pstate2); ++ if (vega10_psa == NULL || vega10_psb == NULL) ++ return -EINVAL; + + /* If the two states don't even have the same number of performance levels + * they cannot be the same state. +@@ -5128,6 +5140,8 @@ static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) + return -EINVAL; + + vega10_ps = cast_phw_vega10_power_state(&ps->hardware); ++ if (vega10_ps == NULL) ++ return -EINVAL; + + vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].gfx_clock = +@@ -5179,6 +5193,8 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) + return -EINVAL; + + vega10_ps = cast_phw_vega10_power_state(&ps->hardware); ++ if (vega10_ps == NULL) ++ return -EINVAL; + + vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].mem_clock = +@@ -5420,6 +5436,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr) + return; + + vega10_ps = cast_phw_vega10_power_state(&ps->hardware); ++ if (vega10_ps == NULL) ++ return; ++ + max_level = vega10_ps->performance_level_count - 1; + + if (vega10_ps->performance_levels[max_level].gfx_clock != +@@ -5442,6 +5461,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr) + + ps = (struct pp_power_state *)((unsigned long)(hwmgr->ps) + hwmgr->ps_size * (hwmgr->num_ps - 1)); + vega10_ps = cast_phw_vega10_power_state(&ps->hardware); ++ if (vega10_ps == NULL) ++ return; ++ + max_level = vega10_ps->performance_level_count - 1; + + if (vega10_ps->performance_levels[max_level].gfx_clock != +@@ -5632,6 +5654,8 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_ + return -EINVAL; + + vega10_ps = cast_const_phw_vega10_power_state(state); ++ if (vega10_ps == NULL) ++ return -EINVAL; + + i = index > vega10_ps->performance_level_count - 1 ? + vega10_ps->performance_level_count - 1 : index; +diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +index 56e4c312cb7a9..1402e468aa90f 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +@@ -1846,7 +1846,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, + { + int ret = 0; + int index = 0; +- long workload; ++ long workload[1]; + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + + if (!skip_display_settings) { +@@ -1886,10 +1886,10 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, + smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { + index = fls(smu->workload_mask); + index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; +- workload = smu->workload_setting[index]; ++ workload[0] = smu->workload_setting[index]; + +- if (smu->power_profile_mode != workload) +- smu_bump_power_profile_mode(smu, &workload, 0); ++ if (smu->power_profile_mode != workload[0]) ++ smu_bump_power_profile_mode(smu, workload, 0); + } + + return ret; +@@ -1939,7 +1939,7 @@ static int smu_switch_power_profile(void *handle, + { + struct smu_context *smu = handle; + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); +- long workload; ++ long workload[1]; + uint32_t index; + + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) +@@ -1952,17 +1952,17 @@ static int smu_switch_power_profile(void *handle, + smu->workload_mask &= ~(1 << smu->workload_prority[type]); + index = fls(smu->workload_mask); + index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; +- workload = smu->workload_setting[index]; ++ workload[0] = smu->workload_setting[index]; + } else { + smu->workload_mask |= (1 << smu->workload_prority[type]); + index = fls(smu->workload_mask); + index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; +- workload = smu->workload_setting[index]; ++ workload[0] = smu->workload_setting[index]; + } + + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && + smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) +- smu_bump_power_profile_mode(smu, &workload, 0); ++ smu_bump_power_profile_mode(smu, workload, 0); + + return 0; + } +diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c +index 6a4f20fccf841..7b0bc9704eacb 100644 +--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c ++++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c +@@ -1027,7 +1027,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, + u32 status_reg; + u8 *buffer = msg->buffer; + unsigned int i; +- int num_transferred = 0; + int ret; + + /* Buffer size of AUX CH is 16 bytes */ +@@ -1079,7 +1078,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, + reg = buffer[i]; + writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0 + + 4 * i); +- num_transferred++; + } + } + +@@ -1127,7 +1125,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, + reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 + + 4 * i); + buffer[i] = (unsigned char)reg; +- num_transferred++; + } + } + +@@ -1144,7 +1141,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, + (msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ) + msg->reply = DP_AUX_NATIVE_REPLY_ACK; + +- return num_transferred > 0 ? num_transferred : -EBUSY; ++ return msg->size; + + aux_error: + /* if aux err happen, reset aux */ +diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c +index 9023c0216a8a4..6ead31701e79e 100644 +--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c ++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c +@@ -4024,6 +4024,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) + if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { + const struct drm_dp_connection_status_notify *conn_stat = + &up_req->msg.u.conn_stat; ++ bool handle_csn; + + drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", + conn_stat->port_number, +@@ -4032,6 +4033,16 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) + conn_stat->message_capability_status, + conn_stat->input_port, + conn_stat->peer_device_type); ++ ++ mutex_lock(&mgr->probe_lock); ++ handle_csn = mgr->mst_primary->link_address_sent; ++ mutex_unlock(&mgr->probe_lock); ++ ++ if (!handle_csn) { ++ drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it."); ++ kfree(up_req); ++ goto out; ++ } + } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { + const struct drm_dp_resource_status_notify *res_stat = + &up_req->msg.u.resource_stat; +diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c +index 0683a129b3628..51df7244de718 100644 +--- a/drivers/gpu/drm/drm_client_modeset.c ++++ b/drivers/gpu/drm/drm_client_modeset.c +@@ -869,6 +869,11 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, + + kfree(modeset->mode); + modeset->mode = drm_mode_duplicate(dev, mode); ++ if (!modeset->mode) { ++ ret = -ENOMEM; ++ break; ++ } ++ + drm_connector_get(connector); + modeset->connectors[modeset->num_connectors++] = connector; + modeset->x = offset->x; +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c +index 310654542b42c..a59c17ec7fa36 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c +@@ -290,6 +290,41 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) + return i915_error_to_vmf_fault(err); + } + ++static void set_address_limits(struct vm_area_struct *area, ++ struct i915_vma *vma, ++ unsigned long obj_offset, ++ unsigned long *start_vaddr, ++ unsigned long *end_vaddr) ++{ ++ unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */ ++ long start, end; /* memory boundaries */ ++ ++ /* ++ * Let's move into the ">> PAGE_SHIFT" ++ * domain to be sure not to lose bits ++ */ ++ vm_start = area->vm_start >> PAGE_SHIFT; ++ vm_end = area->vm_end >> PAGE_SHIFT; ++ vma_size = vma->size >> PAGE_SHIFT; ++ ++ /* ++ * Calculate the memory boundaries by considering the offset ++ * provided by the user during memory mapping and the offset ++ * provided for the partial mapping. ++ */ ++ start = vm_start; ++ start -= obj_offset; ++ start += vma->gtt_view.partial.offset; ++ end = start + vma_size; ++ ++ start = max_t(long, start, vm_start); ++ end = min_t(long, end, vm_end); ++ ++ /* Let's move back into the "<< PAGE_SHIFT" domain */ ++ *start_vaddr = (unsigned long)start << PAGE_SHIFT; ++ *end_vaddr = (unsigned long)end << PAGE_SHIFT; ++} ++ + static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) + { + #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) +@@ -302,14 +337,18 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) + struct i915_ggtt *ggtt = to_gt(i915)->ggtt; + bool write = area->vm_flags & VM_WRITE; + struct i915_gem_ww_ctx ww; ++ unsigned long obj_offset; ++ unsigned long start, end; /* memory boundaries */ + intel_wakeref_t wakeref; + struct i915_vma *vma; + pgoff_t page_offset; ++ unsigned long pfn; + int srcu; + int ret; + +- /* We don't use vmf->pgoff since that has the fake offset */ ++ obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node); + page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; ++ page_offset += obj_offset; + + trace_i915_gem_object_fault(obj, page_offset, true, write); + +@@ -402,12 +441,14 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) + if (ret) + goto err_unpin; + ++ set_address_limits(area, vma, obj_offset, &start, &end); ++ ++ pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT; ++ pfn += (start - area->vm_start) >> PAGE_SHIFT; ++ pfn += obj_offset - vma->gtt_view.partial.offset; ++ + /* Finally, remap it using the new GTT offset */ +- ret = remap_io_mapping(area, +- area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT), +- (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT, +- min_t(u64, vma->size, area->vm_end - area->vm_start), +- &ggtt->iomap); ++ ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap); + if (ret) + goto err_fence; + +@@ -1088,6 +1129,8 @@ int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma + mmo = mmap_offset_attach(obj, mmap_type, NULL); + if (IS_ERR(mmo)) + return PTR_ERR(mmo); ++ ++ vma->vm_pgoff += drm_vma_node_start(&mmo->vma_node); + } + + /* +diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c +index 10fd9154cc465..8c9b656eeb59d 100644 +--- a/drivers/gpu/drm/lima/lima_drv.c ++++ b/drivers/gpu/drm/lima/lima_drv.c +@@ -486,3 +486,4 @@ module_platform_driver(lima_platform_driver); + MODULE_AUTHOR("Lima Project Developers"); + MODULE_DESCRIPTION("Lima DRM Driver"); + MODULE_LICENSE("GPL v2"); ++MODULE_SOFTDEP("pre: governor_simpleondemand"); +diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c +index 0c48bdf3e7f80..f5c5d06d0d4bb 100644 +--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c ++++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c +@@ -31,6 +31,8 @@ + #include + #include + ++#include ++ + #include "mgag200_drv.h" + + static int mga_i2c_read_gpio(struct mga_device *mdev) +@@ -86,7 +88,7 @@ static int mga_gpio_getscl(void *data) + return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0; + } + +-static void mgag200_i2c_release(void *res) ++static void mgag200_i2c_release(struct drm_device *dev, void *res) + { + struct mga_i2c_chan *i2c = res; + +@@ -115,7 +117,7 @@ int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c) + i2c->adapter.algo_data = &i2c->bit; + + i2c->bit.udelay = 10; +- i2c->bit.timeout = 2; ++ i2c->bit.timeout = usecs_to_jiffies(2200); + i2c->bit.data = i2c; + i2c->bit.setsda = mga_gpio_setsda; + i2c->bit.setscl = mga_gpio_setscl; +@@ -126,5 +128,5 @@ int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c) + if (ret) + return ret; + +- return devm_add_action_or_reset(dev->dev, mgag200_i2c_release, i2c); ++ return drmm_add_action_or_reset(dev, mgag200_i2c_release, i2c); + } +diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c +index 2bbcdc649e862..3d41e590d4712 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c ++++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c +@@ -1320,6 +1320,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) + + drm_gpuva_for_each_op(va_op, op->ops) { + struct drm_gem_object *obj = op_gem_obj(va_op); ++ struct nouveau_bo *nvbo; + + if (unlikely(!obj)) + continue; +@@ -1330,8 +1331,9 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) + if (unlikely(va_op->op == DRM_GPUVA_OP_UNMAP)) + continue; + +- ret = nouveau_bo_validate(nouveau_gem_object(obj), +- true, false); ++ nvbo = nouveau_gem_object(obj); ++ nouveau_bo_placement_set(nvbo, nvbo->valid_domains, 0); ++ ret = nouveau_bo_validate(nvbo, true, false); + if (ret) { + op = list_last_op(&bind_job->ops); + goto unwind; +diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h +index 844f0490bf31f..ce8832916704f 100644 +--- a/drivers/gpu/drm/radeon/pptable.h ++++ b/drivers/gpu/drm/radeon/pptable.h +@@ -439,7 +439,7 @@ typedef struct _StateArray{ + //how many states we have + UCHAR ucNumEntries; + +- ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries); ++ ATOM_PPLIB_STATE_V2 states[] /* __counted_by(ucNumEntries) */; + }StateArray; + + +diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c +index 2c7c92272fe39..f8f22b8a67cdf 100644 +--- a/drivers/hwmon/corsair-psu.c ++++ b/drivers/hwmon/corsair-psu.c +@@ -875,15 +875,16 @@ static const struct hid_device_id corsairpsu_idtable[] = { + { HID_USB_DEVICE(0x1b1c, 0x1c04) }, /* Corsair HX650i */ + { HID_USB_DEVICE(0x1b1c, 0x1c05) }, /* Corsair HX750i */ + { HID_USB_DEVICE(0x1b1c, 0x1c06) }, /* Corsair HX850i */ +- { HID_USB_DEVICE(0x1b1c, 0x1c07) }, /* Corsair HX1000i Series 2022 */ +- { HID_USB_DEVICE(0x1b1c, 0x1c08) }, /* Corsair HX1200i */ ++ { HID_USB_DEVICE(0x1b1c, 0x1c07) }, /* Corsair HX1000i Legacy */ ++ { HID_USB_DEVICE(0x1b1c, 0x1c08) }, /* Corsair HX1200i Legacy */ + { HID_USB_DEVICE(0x1b1c, 0x1c09) }, /* Corsair RM550i */ + { HID_USB_DEVICE(0x1b1c, 0x1c0a) }, /* Corsair RM650i */ + { HID_USB_DEVICE(0x1b1c, 0x1c0b) }, /* Corsair RM750i */ + { HID_USB_DEVICE(0x1b1c, 0x1c0c) }, /* Corsair RM850i */ + { HID_USB_DEVICE(0x1b1c, 0x1c0d) }, /* Corsair RM1000i */ + { HID_USB_DEVICE(0x1b1c, 0x1c1e) }, /* Corsair HX1000i Series 2023 */ +- { HID_USB_DEVICE(0x1b1c, 0x1c1f) }, /* Corsair HX1500i Series 2022 and 2023 */ ++ { HID_USB_DEVICE(0x1b1c, 0x1c1f) }, /* Corsair HX1500i Legacy and Series 2023 */ ++ { HID_USB_DEVICE(0x1b1c, 0x1c23) }, /* Corsair HX1200i Series 2023 */ + { }, + }; + MODULE_DEVICE_TABLE(hid, corsairpsu_idtable); +diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c +index 5cc32a465f12e..b17411e97be68 100644 +--- a/drivers/i2c/busses/i2c-qcom-geni.c ++++ b/drivers/i2c/busses/i2c-qcom-geni.c +@@ -991,8 +991,11 @@ static int __maybe_unused geni_i2c_runtime_resume(struct device *dev) + return ret; + + ret = geni_se_resources_on(&gi2c->se); +- if (ret) ++ if (ret) { ++ clk_disable_unprepare(gi2c->core_clk); ++ geni_icc_disable(&gi2c->se); + return ret; ++ } + + enable_irq(gi2c->irq); + gi2c->suspended = 0; +diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c +index 138c3f5e0093a..6520e09743912 100644 +--- a/drivers/i2c/i2c-smbus.c ++++ b/drivers/i2c/i2c-smbus.c +@@ -34,6 +34,7 @@ static int smbus_do_alert(struct device *dev, void *addrp) + struct i2c_client *client = i2c_verify_client(dev); + struct alert_data *data = addrp; + struct i2c_driver *driver; ++ int ret; + + if (!client || client->addr != data->addr) + return 0; +@@ -47,16 +48,47 @@ static int smbus_do_alert(struct device *dev, void *addrp) + device_lock(dev); + if (client->dev.driver) { + driver = to_i2c_driver(client->dev.driver); +- if (driver->alert) ++ if (driver->alert) { ++ /* Stop iterating after we find the device */ + driver->alert(client, data->type, data->data); +- else ++ ret = -EBUSY; ++ } else { + dev_warn(&client->dev, "no driver alert()!\n"); +- } else ++ ret = -EOPNOTSUPP; ++ } ++ } else { + dev_dbg(&client->dev, "alert with no driver\n"); ++ ret = -ENODEV; ++ } ++ device_unlock(dev); ++ ++ return ret; ++} ++ ++/* Same as above, but call back all drivers with alert handler */ ++ ++static int smbus_do_alert_force(struct device *dev, void *addrp) ++{ ++ struct i2c_client *client = i2c_verify_client(dev); ++ struct alert_data *data = addrp; ++ struct i2c_driver *driver; ++ ++ if (!client || (client->flags & I2C_CLIENT_TEN)) ++ return 0; ++ ++ /* ++ * Drivers should either disable alerts, or provide at least ++ * a minimal handler. Lock so the driver won't change. ++ */ ++ device_lock(dev); ++ if (client->dev.driver) { ++ driver = to_i2c_driver(client->dev.driver); ++ if (driver->alert) ++ driver->alert(client, data->type, data->data); ++ } + device_unlock(dev); + +- /* Stop iterating after we find the device */ +- return -EBUSY; ++ return 0; + } + + /* +@@ -67,6 +99,7 @@ static irqreturn_t smbus_alert(int irq, void *d) + { + struct i2c_smbus_alert *alert = d; + struct i2c_client *ara; ++ unsigned short prev_addr = I2C_CLIENT_END; /* Not a valid address */ + + ara = alert->ara; + +@@ -94,8 +127,25 @@ static irqreturn_t smbus_alert(int irq, void *d) + data.addr, data.data); + + /* Notify driver for the device which issued the alert */ +- device_for_each_child(&ara->adapter->dev, &data, +- smbus_do_alert); ++ status = device_for_each_child(&ara->adapter->dev, &data, ++ smbus_do_alert); ++ /* ++ * If we read the same address more than once, and the alert ++ * was not handled by a driver, it won't do any good to repeat ++ * the loop because it will never terminate. Try again, this ++ * time calling the alert handlers of all devices connected to ++ * the bus, and abort the loop afterwards. If this helps, we ++ * are all set. If it doesn't, there is nothing else we can do, ++ * so we might as well abort the loop. ++ * Note: This assumes that a driver with alert handler handles ++ * the alert properly and clears it if necessary. ++ */ ++ if (data.addr == prev_addr && status != -EBUSY) { ++ device_for_each_child(&ara->adapter->dev, &data, ++ smbus_do_alert_force); ++ break; ++ } ++ prev_addr = data.addr; + } + + return IRQ_HANDLED; +diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c +index 9d8f2c4060431..b35903a06902f 100644 +--- a/drivers/irqchip/irq-loongarch-cpu.c ++++ b/drivers/irqchip/irq-loongarch-cpu.c +@@ -18,11 +18,13 @@ struct fwnode_handle *cpuintc_handle; + + static u32 lpic_gsi_to_irq(u32 gsi) + { ++ int irq = 0; ++ + /* Only pch irqdomain transferring is required for LoongArch. */ + if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ) +- return acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH); ++ irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH); + +- return 0; ++ return (irq > 0) ? irq : 0; + } + + static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi) +diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c +index 58881d3139792..244a8d489cac6 100644 +--- a/drivers/irqchip/irq-mbigen.c ++++ b/drivers/irqchip/irq-mbigen.c +@@ -64,6 +64,20 @@ struct mbigen_device { + void __iomem *base; + }; + ++static inline unsigned int get_mbigen_node_offset(unsigned int nid) ++{ ++ unsigned int offset = nid * MBIGEN_NODE_OFFSET; ++ ++ /* ++ * To avoid touched clear register in unexpected way, we need to directly ++ * skip clear register when access to more than 10 mbigen nodes. ++ */ ++ if (nid >= (REG_MBIGEN_CLEAR_OFFSET / MBIGEN_NODE_OFFSET)) ++ offset += MBIGEN_NODE_OFFSET; ++ ++ return offset; ++} ++ + static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq) + { + unsigned int nid, pin; +@@ -72,8 +86,7 @@ static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq) + nid = hwirq / IRQS_PER_MBIGEN_NODE + 1; + pin = hwirq % IRQS_PER_MBIGEN_NODE; + +- return pin * 4 + nid * MBIGEN_NODE_OFFSET +- + REG_MBIGEN_VEC_OFFSET; ++ return pin * 4 + get_mbigen_node_offset(nid) + REG_MBIGEN_VEC_OFFSET; + } + + static inline void get_mbigen_type_reg(irq_hw_number_t hwirq, +@@ -88,8 +101,7 @@ static inline void get_mbigen_type_reg(irq_hw_number_t hwirq, + *mask = 1 << (irq_ofst % 32); + ofst = irq_ofst / 32 * 4; + +- *addr = ofst + nid * MBIGEN_NODE_OFFSET +- + REG_MBIGEN_TYPE_OFFSET; ++ *addr = ofst + get_mbigen_node_offset(nid) + REG_MBIGEN_TYPE_OFFSET; + } + + static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq, +diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c +index f88df39f41291..471e04eaf3230 100644 +--- a/drivers/irqchip/irq-meson-gpio.c ++++ b/drivers/irqchip/irq-meson-gpio.c +@@ -173,7 +173,7 @@ struct meson_gpio_irq_controller { + void __iomem *base; + u32 channel_irqs[MAX_NUM_CHANNEL]; + DECLARE_BITMAP(channel_map, MAX_NUM_CHANNEL); +- spinlock_t lock; ++ raw_spinlock_t lock; + }; + + static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl, +@@ -182,14 +182,14 @@ static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl, + unsigned long flags; + u32 tmp; + +- spin_lock_irqsave(&ctl->lock, flags); ++ raw_spin_lock_irqsave(&ctl->lock, flags); + + tmp = readl_relaxed(ctl->base + reg); + tmp &= ~mask; + tmp |= val; + writel_relaxed(tmp, ctl->base + reg); + +- spin_unlock_irqrestore(&ctl->lock, flags); ++ raw_spin_unlock_irqrestore(&ctl->lock, flags); + } + + static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl) +@@ -239,12 +239,12 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, + unsigned long flags; + unsigned int idx; + +- spin_lock_irqsave(&ctl->lock, flags); ++ raw_spin_lock_irqsave(&ctl->lock, flags); + + /* Find a free channel */ + idx = find_first_zero_bit(ctl->channel_map, ctl->params->nr_channels); + if (idx >= ctl->params->nr_channels) { +- spin_unlock_irqrestore(&ctl->lock, flags); ++ raw_spin_unlock_irqrestore(&ctl->lock, flags); + pr_err("No channel available\n"); + return -ENOSPC; + } +@@ -252,7 +252,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, + /* Mark the channel as used */ + set_bit(idx, ctl->channel_map); + +- spin_unlock_irqrestore(&ctl->lock, flags); ++ raw_spin_unlock_irqrestore(&ctl->lock, flags); + + /* + * Setup the mux of the channel to route the signal of the pad +@@ -562,7 +562,7 @@ static int meson_gpio_irq_of_init(struct device_node *node, struct device_node * + if (!ctl) + return -ENOMEM; + +- spin_lock_init(&ctl->lock); ++ raw_spin_lock_init(&ctl->lock); + + ctl->base = of_iomap(node, 0); + if (!ctl->base) { +diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c +index 238d3d3449496..7e08714d507f4 100644 +--- a/drivers/irqchip/irq-xilinx-intc.c ++++ b/drivers/irqchip/irq-xilinx-intc.c +@@ -189,7 +189,7 @@ static int __init xilinx_intc_of_init(struct device_node *intc, + irqc->intr_mask = 0; + } + +- if (irqc->intr_mask >> irqc->nr_irq) ++ if ((u64)irqc->intr_mask >> irqc->nr_irq) + pr_warn("irq-xilinx: mismatch in kind-of-intr param\n"); + + pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n", +diff --git a/drivers/md/md.c b/drivers/md/md.c +index b5dea664f946d..35b003b83ef1b 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -456,7 +456,6 @@ void mddev_suspend(struct mddev *mddev) + clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags); + wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags)); + +- del_timer_sync(&mddev->safemode_timer); + /* restrict memory reclaim I/O during raid array is suspend */ + mddev->noio_flag = memalloc_noio_save(); + } +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 1507540a9cb4e..2c7f11e576673 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -6326,7 +6326,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk + safepos = conf->reshape_safe; + sector_div(safepos, data_disks); + if (mddev->reshape_backwards) { +- BUG_ON(writepos < reshape_sectors); ++ if (WARN_ON(writepos < reshape_sectors)) ++ return MaxSector; ++ + writepos -= reshape_sectors; + readpos += reshape_sectors; + safepos += reshape_sectors; +@@ -6344,14 +6346,18 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk + * to set 'stripe_addr' which is where we will write to. + */ + if (mddev->reshape_backwards) { +- BUG_ON(conf->reshape_progress == 0); ++ if (WARN_ON(conf->reshape_progress == 0)) ++ return MaxSector; ++ + stripe_addr = writepos; +- BUG_ON((mddev->dev_sectors & +- ~((sector_t)reshape_sectors - 1)) +- - reshape_sectors - stripe_addr +- != sector_nr); ++ if (WARN_ON((mddev->dev_sectors & ++ ~((sector_t)reshape_sectors - 1)) - ++ reshape_sectors - stripe_addr != sector_nr)) ++ return MaxSector; + } else { +- BUG_ON(writepos != sector_nr + reshape_sectors); ++ if (WARN_ON(writepos != sector_nr + reshape_sectors)) ++ return MaxSector; ++ + stripe_addr = sector_nr; + } + +diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c +index 133d77d1ea0c3..4f438eaa7d385 100644 +--- a/drivers/media/platform/amphion/vdec.c ++++ b/drivers/media/platform/amphion/vdec.c +@@ -195,7 +195,6 @@ static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl) + struct vdec_t *vdec = inst->priv; + int ret = 0; + +- vpu_inst_lock(inst); + switch (ctrl->id) { + case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE: + vdec->params.display_delay_enable = ctrl->val; +@@ -207,7 +206,6 @@ static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl) + ret = -EINVAL; + break; + } +- vpu_inst_unlock(inst); + + return ret; + } +diff --git a/drivers/media/platform/amphion/venc.c b/drivers/media/platform/amphion/venc.c +index 4eb57d793a9c0..16ed4d21519cd 100644 +--- a/drivers/media/platform/amphion/venc.c ++++ b/drivers/media/platform/amphion/venc.c +@@ -518,7 +518,6 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl) + struct venc_t *venc = inst->priv; + int ret = 0; + +- vpu_inst_lock(inst); + switch (ctrl->id) { + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + venc->params.profile = ctrl->val; +@@ -579,7 +578,6 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl) + ret = -EINVAL; + break; + } +- vpu_inst_unlock(inst); + + return ret; + } +diff --git a/drivers/media/tuners/xc2028.c b/drivers/media/tuners/xc2028.c +index 5a967edceca93..352b8a3679b72 100644 +--- a/drivers/media/tuners/xc2028.c ++++ b/drivers/media/tuners/xc2028.c +@@ -1361,9 +1361,16 @@ static void load_firmware_cb(const struct firmware *fw, + void *context) + { + struct dvb_frontend *fe = context; +- struct xc2028_data *priv = fe->tuner_priv; ++ struct xc2028_data *priv; + int rc; + ++ if (!fe) { ++ pr_warn("xc2028: No frontend in %s\n", __func__); ++ return; ++ } ++ ++ priv = fe->tuner_priv; ++ + tuner_dbg("request_firmware_nowait(): %s\n", fw ? "OK" : "error"); + if (!fw) { + tuner_err("Could not load firmware %s.\n", priv->fname); +diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c +index 5eef560bc8cd8..91c350b254126 100644 +--- a/drivers/media/usb/uvc/uvc_video.c ++++ b/drivers/media/usb/uvc/uvc_video.c +@@ -214,13 +214,13 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream, + * Compute a bandwidth estimation by multiplying the frame + * size by the number of video frames per second, divide the + * result by the number of USB frames (or micro-frames for +- * high-speed devices) per second and add the UVC header size +- * (assumed to be 12 bytes long). ++ * high- and super-speed devices) per second and add the UVC ++ * header size (assumed to be 12 bytes long). + */ + bandwidth = frame->wWidth * frame->wHeight / 8 * format->bpp; + bandwidth *= 10000000 / interval + 1; + bandwidth /= 1000; +- if (stream->dev->udev->speed == USB_SPEED_HIGH) ++ if (stream->dev->udev->speed >= USB_SPEED_HIGH) + bandwidth /= 8; + bandwidth += 12; + +@@ -478,6 +478,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf, + ktime_t time; + u16 host_sof; + u16 dev_sof; ++ u32 dev_stc; + + switch (data[1] & (UVC_STREAM_PTS | UVC_STREAM_SCR)) { + case UVC_STREAM_PTS | UVC_STREAM_SCR: +@@ -526,6 +527,34 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf, + if (dev_sof == stream->clock.last_sof) + return; + ++ dev_stc = get_unaligned_le32(&data[header_size - 6]); ++ ++ /* ++ * STC (Source Time Clock) is the clock used by the camera. The UVC 1.5 ++ * standard states that it "must be captured when the first video data ++ * of a video frame is put on the USB bus". This is generally understood ++ * as requiring devices to clear the payload header's SCR bit before ++ * the first packet containing video data. ++ * ++ * Most vendors follow that interpretation, but some (namely SunplusIT ++ * on some devices) always set the `UVC_STREAM_SCR` bit, fill the SCR ++ * field with 0's,and expect that the driver only processes the SCR if ++ * there is data in the packet. ++ * ++ * Ignore all the hardware timestamp information if we haven't received ++ * any data for this frame yet, the packet contains no data, and both ++ * STC and SOF are zero. This heuristics should be safe on compliant ++ * devices. This should be safe with compliant devices, as in the very ++ * unlikely case where a UVC 1.1 device would send timing information ++ * only before the first packet containing data, and both STC and SOF ++ * happen to be zero for a particular frame, we would only miss one ++ * clock sample from many and the clock recovery algorithm wouldn't ++ * suffer from this condition. ++ */ ++ if (buf && buf->bytesused == 0 && len == header_size && ++ dev_stc == 0 && dev_sof == 0) ++ return; ++ + stream->clock.last_sof = dev_sof; + + host_sof = usb_get_current_frame_number(stream->dev->udev); +@@ -575,7 +604,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf, + spin_lock_irqsave(&stream->clock.lock, flags); + + sample = &stream->clock.samples[stream->clock.head]; +- sample->dev_stc = get_unaligned_le32(&data[header_size - 6]); ++ sample->dev_stc = dev_stc; + sample->dev_sof = dev_sof; + sample->host_sof = host_sof; + sample->host_time = time; +diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c +index bfe4caa0c99d4..4cb79a4f24612 100644 +--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c ++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c +@@ -485,6 +485,8 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv) + clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags); + } + ++ tx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(tx_ring->obj_num) - ++ ilog2(tx_ring->obj_num); + tx_ring->obj_size = tx_obj_size; + + rem = priv->rx_obj_num; +diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c +index e5bd57b65aafe..5b0c7890d4b44 100644 +--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c ++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c +@@ -2,7 +2,7 @@ + // + // mcp251xfd - Microchip MCP251xFD Family CAN controller driver + // +-// Copyright (c) 2019, 2020, 2021 Pengutronix, ++// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix, + // Marc Kleine-Budde + // + // Based on: +@@ -16,6 +16,11 @@ + + #include "mcp251xfd.h" + ++static inline bool mcp251xfd_tx_fifo_sta_full(u32 fifo_sta) ++{ ++ return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF); ++} ++ + static inline int + mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv, + u8 *tef_tail) +@@ -55,56 +60,39 @@ static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv) + return 0; + } + +-static int +-mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq) +-{ +- const struct mcp251xfd_tx_ring *tx_ring = priv->tx; +- u32 tef_sta; +- int err; +- +- err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta); +- if (err) +- return err; +- +- if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) { +- netdev_err(priv->ndev, +- "Transmit Event FIFO buffer overflow.\n"); +- return -ENOBUFS; +- } +- +- netdev_info(priv->ndev, +- "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n", +- tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ? +- "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ? +- "not empty" : "empty", +- seq, priv->tef->tail, priv->tef->head, tx_ring->head); +- +- /* The Sequence Number in the TEF doesn't match our tef_tail. */ +- return -EAGAIN; +-} +- + static int + mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, + const struct mcp251xfd_hw_tef_obj *hw_tef_obj, + unsigned int *frame_len_ptr) + { + struct net_device_stats *stats = &priv->ndev->stats; ++ u32 seq, tef_tail_masked, tef_tail; + struct sk_buff *skb; +- u32 seq, seq_masked, tef_tail_masked, tef_tail; + +- seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, ++ /* Use the MCP2517FD mask on the MCP2518FD, too. We only ++ * compare 7 bits, this is enough to detect old TEF objects. ++ */ ++ seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK, + hw_tef_obj->flags); +- +- /* Use the MCP2517FD mask on the MCP2518FD, too. We only +- * compare 7 bits, this should be enough to detect +- * net-yet-completed, i.e. old TEF objects. +- */ +- seq_masked = seq & +- field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); + tef_tail_masked = priv->tef->tail & + field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); +- if (seq_masked != tef_tail_masked) +- return mcp251xfd_handle_tefif_recover(priv, seq); ++ ++ /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI ++ * bits of a FIFOSTA register, here the TX FIFO tail index ++ * might be corrupted and we might process past the TEF FIFO's ++ * head into old CAN frames. ++ * ++ * Compare the sequence number of the currently processed CAN ++ * frame with the expected sequence number. Abort with ++ * -EBADMSG if an old CAN frame is detected. ++ */ ++ if (seq != tef_tail_masked) { ++ netdev_dbg(priv->ndev, "%s: chip=0x%02x ring=0x%02x\n", __func__, ++ seq, tef_tail_masked); ++ stats->tx_fifo_errors++; ++ ++ return -EBADMSG; ++ } + + tef_tail = mcp251xfd_get_tef_tail(priv); + skb = priv->can.echo_skb[tef_tail]; +@@ -120,28 +108,44 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, + return 0; + } + +-static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv) ++static int ++mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p) + { + const struct mcp251xfd_tx_ring *tx_ring = priv->tx; +- unsigned int new_head; +- u8 chip_tx_tail; ++ const u8 shift = tx_ring->obj_num_shift_to_u8; ++ u8 chip_tx_tail, tail, len; ++ u32 fifo_sta; + int err; + +- err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail); ++ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(priv->tx->fifo_nr), ++ &fifo_sta); + if (err) + return err; + +- /* chip_tx_tail, is the next TX-Object send by the HW. +- * The new TEF head must be >= the old head, ... ++ if (mcp251xfd_tx_fifo_sta_full(fifo_sta)) { ++ *len_p = tx_ring->obj_num; ++ return 0; ++ } ++ ++ chip_tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); ++ ++ err = mcp251xfd_check_tef_tail(priv); ++ if (err) ++ return err; ++ tail = mcp251xfd_get_tef_tail(priv); ++ ++ /* First shift to full u8. The subtraction works on signed ++ * values, that keeps the difference steady around the u8 ++ * overflow. The right shift acts on len, which is an u8. + */ +- new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail; +- if (new_head <= priv->tef->head) +- new_head += tx_ring->obj_num; ++ BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(chip_tx_tail)); ++ BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(tail)); ++ BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len)); + +- /* ... but it cannot exceed the TX head. */ +- priv->tef->head = min(new_head, tx_ring->head); ++ len = (chip_tx_tail << shift) - (tail << shift); ++ *len_p = len >> shift; + +- return mcp251xfd_check_tef_tail(priv); ++ return 0; + } + + static inline int +@@ -182,13 +186,12 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) + u8 tef_tail, len, l; + int err, i; + +- err = mcp251xfd_tef_ring_update(priv); ++ err = mcp251xfd_get_tef_len(priv, &len); + if (err) + return err; + + tef_tail = mcp251xfd_get_tef_tail(priv); +- len = mcp251xfd_get_tef_len(priv); +- l = mcp251xfd_get_tef_linear_len(priv); ++ l = mcp251xfd_get_tef_linear_len(priv, len); + err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l); + if (err) + return err; +@@ -203,12 +206,12 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) + unsigned int frame_len = 0; + + err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len); +- /* -EAGAIN means the Sequence Number in the TEF +- * doesn't match our tef_tail. This can happen if we +- * read the TEF objects too early. Leave loop let the +- * interrupt handler call us again. ++ /* -EBADMSG means we're affected by mcp2518fd erratum ++ * DS80000789E 6., i.e. the Sequence Number in the TEF ++ * doesn't match our tef_tail. Don't process any ++ * further and mark processed frames as good. + */ +- if (err == -EAGAIN) ++ if (err == -EBADMSG) + goto out_netif_wake_queue; + if (err) + return err; +@@ -223,6 +226,8 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) + struct mcp251xfd_tx_ring *tx_ring = priv->tx; + int offset; + ++ ring->head += len; ++ + /* Increment the TEF FIFO tail pointer 'len' times in + * a single SPI message. + * +diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +index b35bfebd23f29..4628bf847bc9b 100644 +--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h ++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +@@ -524,6 +524,7 @@ struct mcp251xfd_tef_ring { + + /* u8 obj_num equals tx_ring->obj_num */ + /* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */ ++ /* u8 obj_num_shift_to_u8 equals tx_ring->obj_num_shift_to_u8 */ + + union mcp251xfd_write_reg_buf irq_enable_buf; + struct spi_transfer irq_enable_xfer; +@@ -542,6 +543,7 @@ struct mcp251xfd_tx_ring { + u8 nr; + u8 fifo_nr; + u8 obj_num; ++ u8 obj_num_shift_to_u8; + u8 obj_size; + + struct mcp251xfd_tx_obj obj[MCP251XFD_TX_OBJ_NUM_MAX]; +@@ -861,17 +863,8 @@ static inline u8 mcp251xfd_get_tef_tail(const struct mcp251xfd_priv *priv) + return priv->tef->tail & (priv->tx->obj_num - 1); + } + +-static inline u8 mcp251xfd_get_tef_len(const struct mcp251xfd_priv *priv) ++static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv, u8 len) + { +- return priv->tef->head - priv->tef->tail; +-} +- +-static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv) +-{ +- u8 len; +- +- len = mcp251xfd_get_tef_len(priv); +- + return min_t(u8, len, priv->tx->obj_num - mcp251xfd_get_tef_tail(priv)); + } + +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c +index cd1f240c90f39..257df16768750 100644 +--- a/drivers/net/dsa/bcm_sf2.c ++++ b/drivers/net/dsa/bcm_sf2.c +@@ -678,8 +678,10 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) + of_remove_property(child, prop); + + phydev = of_phy_find_device(child); +- if (phydev) ++ if (phydev) { + phy_device_remove(phydev); ++ phy_device_free(phydev); ++ } + } + + err = mdiobus_register(priv->slave_mii_bus); +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +index 1248792d7fd4d..0715ea5bf13ed 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +@@ -42,19 +42,15 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + +- if (dev->phydev) { ++ if (dev->phydev) + phy_ethtool_get_wol(dev->phydev, wol); +- if (wol->supported) +- return; +- } + +- if (!device_can_wakeup(kdev)) { +- wol->supported = 0; +- wol->wolopts = 0; ++ /* MAC is not wake-up capable, return what the PHY does */ ++ if (!device_can_wakeup(kdev)) + return; +- } + +- wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; ++ /* Overlay MAC capabilities with that of the PHY queried before */ ++ wol->supported |= WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; + wol->wolopts = priv->wolopts; + memset(wol->sopass, 0, sizeof(wol->sopass)); + +diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c +index e32f6724f5681..2e4f3e1782a25 100644 +--- a/drivers/net/ethernet/freescale/fec_ptp.c ++++ b/drivers/net/ethernet/freescale/fec_ptp.c +@@ -775,6 +775,9 @@ void fec_ptp_stop(struct platform_device *pdev) + struct net_device *ndev = platform_get_drvdata(pdev); + struct fec_enet_private *fep = netdev_priv(ndev); + ++ if (fep->pps_enable) ++ fec_ptp_enable_pps(fep, 0); ++ + cancel_delayed_work_sync(&fep->time_keep); + hrtimer_cancel(&fep->perout_timer); + if (fep->ptp_clock) +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c +index 600a2f5370875..b168a37a5dfff 100644 +--- a/drivers/net/ethernet/intel/ice/ice_main.c ++++ b/drivers/net/ethernet/intel/ice/ice_main.c +@@ -557,6 +557,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) + if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) + return; + ++ synchronize_irq(pf->oicr_irq.virq); ++ + ice_unplug_aux_dev(pf); + + /* Notify VFs of impending reset */ +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index 8d9743a5e42c7..79ec6fcc9e259 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -2374,6 +2374,9 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq + if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) + return; + ++ if (unlikely(!cstrides)) ++ return; ++ + wq = &rq->mpwqe.wq; + wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); + mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +index d5d2a4c776c1c..ded1bbda5266f 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +@@ -21,6 +21,7 @@ + #define RGMII_IO_MACRO_CONFIG2 0x1C + #define RGMII_IO_MACRO_DEBUG1 0x20 + #define EMAC_SYSTEM_LOW_POWER_DEBUG 0x28 ++#define EMAC_WRAPPER_SGMII_PHY_CNTRL1 0xf4 + + /* RGMII_IO_MACRO_CONFIG fields */ + #define RGMII_CONFIG_FUNC_CLK_EN BIT(30) +@@ -79,6 +80,9 @@ + #define ETHQOS_MAC_CTRL_SPEED_MODE BIT(14) + #define ETHQOS_MAC_CTRL_PORT_SEL BIT(15) + ++/* EMAC_WRAPPER_SGMII_PHY_CNTRL1 bits */ ++#define SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN BIT(3) ++ + #define SGMII_10M_RX_CLK_DVDR 0x31 + + struct ethqos_emac_por { +@@ -95,6 +99,7 @@ struct ethqos_emac_driver_data { + bool has_integrated_pcs; + u32 dma_addr_width; + struct dwmac4_addrs dwmac4_addrs; ++ bool needs_sgmii_loopback; + }; + + struct qcom_ethqos { +@@ -113,6 +118,7 @@ struct qcom_ethqos { + unsigned int num_por; + bool rgmii_config_loopback_en; + bool has_emac_ge_3; ++ bool needs_sgmii_loopback; + }; + + static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset) +@@ -187,8 +193,22 @@ ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed) + clk_set_rate(ethqos->link_clk, ethqos->link_clk_rate); + } + ++static void ++qcom_ethqos_set_sgmii_loopback(struct qcom_ethqos *ethqos, bool enable) ++{ ++ if (!ethqos->needs_sgmii_loopback || ++ ethqos->phy_mode != PHY_INTERFACE_MODE_2500BASEX) ++ return; ++ ++ rgmii_updatel(ethqos, ++ SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN, ++ enable ? SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN : 0, ++ EMAC_WRAPPER_SGMII_PHY_CNTRL1); ++} ++ + static void ethqos_set_func_clk_en(struct qcom_ethqos *ethqos) + { ++ qcom_ethqos_set_sgmii_loopback(ethqos, true); + rgmii_updatel(ethqos, RGMII_CONFIG_FUNC_CLK_EN, + RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG); + } +@@ -273,6 +293,7 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = { + .has_emac_ge_3 = true, + .link_clk_name = "phyaux", + .has_integrated_pcs = true, ++ .needs_sgmii_loopback = true, + .dma_addr_width = 36, + .dwmac4_addrs = { + .dma_chan = 0x00008100, +@@ -646,6 +667,7 @@ static void ethqos_fix_mac_speed(void *priv, unsigned int speed, unsigned int mo + { + struct qcom_ethqos *ethqos = priv; + ++ qcom_ethqos_set_sgmii_loopback(ethqos, false); + ethqos->speed = speed; + ethqos_update_link_clk(ethqos, speed); + ethqos_configure(ethqos); +@@ -781,6 +803,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) + ethqos->num_por = data->num_por; + ethqos->rgmii_config_loopback_en = data->rgmii_config_loopback_en; + ethqos->has_emac_ge_3 = data->has_emac_ge_3; ++ ethqos->needs_sgmii_loopback = data->needs_sgmii_loopback; + + ethqos->link_clk = devm_clk_get(dev, data->link_clk_name ?: "rgmii"); + if (IS_ERR(ethqos->link_clk)) +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index befbca01bfe37..b1380cf1b13ab 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -201,6 +201,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb) + break; + default: + /* not ip - do not know what to do */ ++ kfree_skb(skbn); + goto skip; + } + +diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h +index c926952c956ef..33f4706af880d 100644 +--- a/drivers/net/wireless/ath/ath12k/core.h ++++ b/drivers/net/wireless/ath/ath12k/core.h +@@ -181,6 +181,8 @@ enum ath12k_dev_flags { + ATH12K_FLAG_REGISTERED, + ATH12K_FLAG_QMI_FAIL, + ATH12K_FLAG_HTC_SUSPEND_COMPLETE, ++ ATH12K_FLAG_CE_IRQ_ENABLED, ++ ATH12K_FLAG_EXT_IRQ_ENABLED, + }; + + enum ath12k_monitor_flags { +diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c +index 2c17b1e7681a5..d9bc07844fb71 100644 +--- a/drivers/net/wireless/ath/ath12k/dp_rx.c ++++ b/drivers/net/wireless/ath/ath12k/dp_rx.c +@@ -2759,6 +2759,7 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev + peer = ath12k_peer_find(ab, vdev_id, peer_mac); + if (!peer) { + spin_unlock_bh(&ab->base_lock); ++ crypto_free_shash(tfm); + ath12k_warn(ab, "failed to find the peer to set up fragment info\n"); + return -ENOENT; + } +diff --git a/drivers/net/wireless/ath/ath12k/hif.h b/drivers/net/wireless/ath/ath12k/hif.h +index 4cbf9b5c04b9c..c653ca1f59b22 100644 +--- a/drivers/net/wireless/ath/ath12k/hif.h ++++ b/drivers/net/wireless/ath/ath12k/hif.h +@@ -10,17 +10,17 @@ + #include "core.h" + + struct ath12k_hif_ops { +- u32 (*read32)(struct ath12k_base *sc, u32 address); +- void (*write32)(struct ath12k_base *sc, u32 address, u32 data); +- void (*irq_enable)(struct ath12k_base *sc); +- void (*irq_disable)(struct ath12k_base *sc); +- int (*start)(struct ath12k_base *sc); +- void (*stop)(struct ath12k_base *sc); +- int (*power_up)(struct ath12k_base *sc); +- void (*power_down)(struct ath12k_base *sc); ++ u32 (*read32)(struct ath12k_base *ab, u32 address); ++ void (*write32)(struct ath12k_base *ab, u32 address, u32 data); ++ void (*irq_enable)(struct ath12k_base *ab); ++ void (*irq_disable)(struct ath12k_base *ab); ++ int (*start)(struct ath12k_base *ab); ++ void (*stop)(struct ath12k_base *ab); ++ int (*power_up)(struct ath12k_base *ab); ++ void (*power_down)(struct ath12k_base *ab); + int (*suspend)(struct ath12k_base *ab); + int (*resume)(struct ath12k_base *ab); +- int (*map_service_to_pipe)(struct ath12k_base *sc, u16 service_id, ++ int (*map_service_to_pipe)(struct ath12k_base *ab, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe); + int (*get_user_msi_vector)(struct ath12k_base *ab, char *user_name, + int *num_vectors, u32 *user_base_data, +diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c +index 58cd678555964..041a9602f0e15 100644 +--- a/drivers/net/wireless/ath/ath12k/pci.c ++++ b/drivers/net/wireless/ath/ath12k/pci.c +@@ -373,6 +373,8 @@ static void ath12k_pci_ce_irqs_disable(struct ath12k_base *ab) + { + int i; + ++ clear_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); ++ + for (i = 0; i < ab->hw_params->ce_count; i++) { + if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; +@@ -406,6 +408,10 @@ static void ath12k_pci_ce_tasklet(struct tasklet_struct *t) + static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg) + { + struct ath12k_ce_pipe *ce_pipe = arg; ++ struct ath12k_base *ab = ce_pipe->ab; ++ ++ if (!test_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags)) ++ return IRQ_HANDLED; + + /* last interrupt received for this CE */ + ce_pipe->timestamp = jiffies; +@@ -424,12 +430,15 @@ static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp) + disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); + } + +-static void __ath12k_pci_ext_irq_disable(struct ath12k_base *sc) ++static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab) + { + int i; + ++ if (!test_and_clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags)) ++ return; ++ + for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { +- struct ath12k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i]; ++ struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + + ath12k_pci_ext_grp_disable(irq_grp); + +@@ -483,6 +492,10 @@ static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget) + static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg) + { + struct ath12k_ext_irq_grp *irq_grp = arg; ++ struct ath12k_base *ab = irq_grp->ab; ++ ++ if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags)) ++ return IRQ_HANDLED; + + ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq); + +@@ -626,6 +639,8 @@ static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab) + { + int i; + ++ set_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); ++ + for (i = 0; i < ab->hw_params->ce_count; i++) { + if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; +@@ -956,6 +971,8 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab) + { + int i; + ++ set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); ++ + for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { + struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index 796c2a00fea4a..0fc7aa78b2e5b 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -826,9 +826,9 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, + struct nvme_command *cmnd) + { + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); ++ struct bio_vec bv = rq_integrity_vec(req); + +- iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), +- rq_dma_dir(req), 0); ++ iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0); + if (dma_mapping_error(dev->dev, iod->meta_dma)) + return BLK_STS_IOERR; + cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); +@@ -969,7 +969,7 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req) + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + + dma_unmap_page(dev->dev, iod->meta_dma, +- rq_integrity_vec(req)->bv_len, rq_dma_dir(req)); ++ rq_integrity_vec(req).bv_len, rq_dma_dir(req)); + } + + if (blk_rq_nr_phys_segments(req)) +diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c +index 306f886b52d20..4ff2aa4b484bc 100644 +--- a/drivers/platform/x86/intel/ifs/core.c ++++ b/drivers/platform/x86/intel/ifs/core.c +@@ -1,6 +1,7 @@ + // SPDX-License-Identifier: GPL-2.0-only + /* Copyright(c) 2022 Intel Corporation. */ + ++#include + #include + #include + #include +@@ -94,6 +95,8 @@ static int __init ifs_init(void) + for (i = 0; i < IFS_NUMTESTS; i++) { + if (!(msrval & BIT(ifs_devices[i].test_caps->integrity_cap_bit))) + continue; ++ ifs_devices[i].rw_data.generation = FIELD_GET(MSR_INTEGRITY_CAPS_SAF_GEN_MASK, ++ msrval); + ret = misc_register(&ifs_devices[i].misc); + if (ret) + goto err_exit; +diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h +index 93191855890f2..6bc63ab705175 100644 +--- a/drivers/platform/x86/intel/ifs/ifs.h ++++ b/drivers/platform/x86/intel/ifs/ifs.h +@@ -174,9 +174,17 @@ union ifs_chunks_auth_status { + union ifs_scan { + u64 data; + struct { +- u32 start :8; +- u32 stop :8; +- u32 rsvd :16; ++ union { ++ struct { ++ u8 start; ++ u8 stop; ++ u16 rsvd; ++ } gen0; ++ struct { ++ u16 start; ++ u16 stop; ++ } gen2; ++ }; + u32 delay :31; + u32 sigmce :1; + }; +@@ -186,9 +194,17 @@ union ifs_scan { + union ifs_status { + u64 data; + struct { +- u32 chunk_num :8; +- u32 chunk_stop_index :8; +- u32 rsvd1 :16; ++ union { ++ struct { ++ u8 chunk_num; ++ u8 chunk_stop_index; ++ u16 rsvd1; ++ } gen0; ++ struct { ++ u16 chunk_num; ++ u16 chunk_stop_index; ++ } gen2; ++ }; + u32 error_code :8; + u32 rsvd2 :22; + u32 control_error :1; +@@ -229,6 +245,7 @@ struct ifs_test_caps { + * @status: it holds simple status pass/fail/untested + * @scan_details: opaque scan status code from h/w + * @cur_batch: number indicating the currently loaded test file ++ * @generation: IFS test generation enumerated by hardware + */ + struct ifs_data { + int loaded_version; +@@ -238,6 +255,7 @@ struct ifs_data { + int status; + u64 scan_details; + u32 cur_batch; ++ u32 generation; + }; + + struct ifs_work { +diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c +index 43c864add778f..c7a5bf24bef35 100644 +--- a/drivers/platform/x86/intel/ifs/runtest.c ++++ b/drivers/platform/x86/intel/ifs/runtest.c +@@ -167,25 +167,35 @@ static int doscan(void *data) + */ + static void ifs_test_core(int cpu, struct device *dev) + { ++ union ifs_status status = {}; + union ifs_scan activate; +- union ifs_status status; + unsigned long timeout; + struct ifs_data *ifsd; ++ int to_start, to_stop; ++ int status_chunk; + u64 msrvals[2]; + int retries; + + ifsd = ifs_get_data(dev); + +- activate.rsvd = 0; ++ activate.gen0.rsvd = 0; + activate.delay = IFS_THREAD_WAIT; + activate.sigmce = 0; +- activate.start = 0; +- activate.stop = ifsd->valid_chunks - 1; ++ to_start = 0; ++ to_stop = ifsd->valid_chunks - 1; ++ ++ if (ifsd->generation) { ++ activate.gen2.start = to_start; ++ activate.gen2.stop = to_stop; ++ } else { ++ activate.gen0.start = to_start; ++ activate.gen0.stop = to_stop; ++ } + + timeout = jiffies + HZ / 2; + retries = MAX_IFS_RETRIES; + +- while (activate.start <= activate.stop) { ++ while (to_start <= to_stop) { + if (time_after(jiffies, timeout)) { + status.error_code = IFS_SW_TIMEOUT; + break; +@@ -196,13 +206,14 @@ static void ifs_test_core(int cpu, struct device *dev) + + status.data = msrvals[1]; + +- trace_ifs_status(cpu, activate, status); ++ trace_ifs_status(cpu, to_start, to_stop, status.data); + + /* Some cases can be retried, give up for others */ + if (!can_restart(status)) + break; + +- if (status.chunk_num == activate.start) { ++ status_chunk = ifsd->generation ? status.gen2.chunk_num : status.gen0.chunk_num; ++ if (status_chunk == to_start) { + /* Check for forward progress */ + if (--retries == 0) { + if (status.error_code == IFS_NO_ERROR) +@@ -211,7 +222,11 @@ static void ifs_test_core(int cpu, struct device *dev) + } + } else { + retries = MAX_IFS_RETRIES; +- activate.start = status.chunk_num; ++ if (ifsd->generation) ++ activate.gen2.start = status_chunk; ++ else ++ activate.gen0.start = status_chunk; ++ to_start = status_chunk; + } + } + +diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c +index b5903193e2f96..ac05942e4e6ac 100644 +--- a/drivers/power/supply/axp288_charger.c ++++ b/drivers/power/supply/axp288_charger.c +@@ -178,18 +178,18 @@ static inline int axp288_charger_set_cv(struct axp288_chrg_info *info, int cv) + u8 reg_val; + int ret; + +- if (cv <= CV_4100MV) { +- reg_val = CHRG_CCCV_CV_4100MV; +- cv = CV_4100MV; +- } else if (cv <= CV_4150MV) { +- reg_val = CHRG_CCCV_CV_4150MV; +- cv = CV_4150MV; +- } else if (cv <= CV_4200MV) { ++ if (cv >= CV_4350MV) { ++ reg_val = CHRG_CCCV_CV_4350MV; ++ cv = CV_4350MV; ++ } else if (cv >= CV_4200MV) { + reg_val = CHRG_CCCV_CV_4200MV; + cv = CV_4200MV; ++ } else if (cv >= CV_4150MV) { ++ reg_val = CHRG_CCCV_CV_4150MV; ++ cv = CV_4150MV; + } else { +- reg_val = CHRG_CCCV_CV_4350MV; +- cv = CV_4350MV; ++ reg_val = CHRG_CCCV_CV_4100MV; ++ cv = CV_4100MV; + } + + reg_val = reg_val << CHRG_CCCV_CV_BIT_POS; +@@ -337,8 +337,8 @@ static int axp288_charger_usb_set_property(struct power_supply *psy, + } + break; + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: +- scaled_val = min(val->intval, info->max_cv); +- scaled_val = DIV_ROUND_CLOSEST(scaled_val, 1000); ++ scaled_val = DIV_ROUND_CLOSEST(val->intval, 1000); ++ scaled_val = min(scaled_val, info->max_cv); + ret = axp288_charger_set_cv(info, scaled_val); + if (ret < 0) { + dev_warn(&info->pdev->dev, "set charge voltage failed\n"); +diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c +index ec163d1bcd189..44c6301f5f174 100644 +--- a/drivers/power/supply/qcom_battmgr.c ++++ b/drivers/power/supply/qcom_battmgr.c +@@ -486,7 +486,7 @@ static int qcom_battmgr_bat_get_property(struct power_supply *psy, + int ret; + + if (!battmgr->service_up) +- return -ENODEV; ++ return -EAGAIN; + + if (battmgr->variant == QCOM_BATTMGR_SC8280XP) + ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp); +@@ -683,7 +683,7 @@ static int qcom_battmgr_ac_get_property(struct power_supply *psy, + int ret; + + if (!battmgr->service_up) +- return -ENODEV; ++ return -EAGAIN; + + ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp); + if (ret) +@@ -748,7 +748,7 @@ static int qcom_battmgr_usb_get_property(struct power_supply *psy, + int ret; + + if (!battmgr->service_up) +- return -ENODEV; ++ return -EAGAIN; + + if (battmgr->variant == QCOM_BATTMGR_SC8280XP) + ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp); +@@ -867,7 +867,7 @@ static int qcom_battmgr_wls_get_property(struct power_supply *psy, + int ret; + + if (!battmgr->service_up) +- return -ENODEV; ++ return -EAGAIN; + + if (battmgr->variant == QCOM_BATTMGR_SC8280XP) + ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp); +diff --git a/drivers/s390/char/sclp_sd.c b/drivers/s390/char/sclp_sd.c +index f9e164be7568f..944e75beb160c 100644 +--- a/drivers/s390/char/sclp_sd.c ++++ b/drivers/s390/char/sclp_sd.c +@@ -320,8 +320,14 @@ static int sclp_sd_store_data(struct sclp_sd_data *result, u8 di) + &esize); + if (rc) { + /* Cancel running request if interrupted */ +- if (rc == -ERESTARTSYS) +- sclp_sd_sync(page, SD_EQ_HALT, di, 0, 0, NULL, NULL); ++ if (rc == -ERESTARTSYS) { ++ if (sclp_sd_sync(page, SD_EQ_HALT, di, 0, 0, NULL, NULL)) { ++ pr_warn("Could not stop Store Data request - leaking at least %zu bytes\n", ++ (size_t)dsize * PAGE_SIZE); ++ data = NULL; ++ asce = 0; ++ } ++ } + vfree(data); + goto out; + } +diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c +index 80d71041086e1..7f32619234696 100644 +--- a/drivers/scsi/mpi3mr/mpi3mr_os.c ++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c +@@ -3447,6 +3447,17 @@ static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, + scmd->sc_data_direction); + priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ + } else { ++ /* ++ * Some firmware versions byte-swap the REPORT ZONES command ++ * reply from ATA-ZAC devices by directly accessing in the host ++ * buffer. This does not respect the default command DMA ++ * direction and causes IOMMU page faults on some architectures ++ * with an IOMMU enforcing write mappings (e.g. AMD hosts). ++ * Avoid such issue by making the REPORT ZONES buffer mapping ++ * bi-directional. ++ */ ++ if (scmd->cmnd[0] == ZBC_IN && scmd->cmnd[1] == ZI_REPORT_ZONES) ++ scmd->sc_data_direction = DMA_BIDIRECTIONAL; + sg_scmd = scsi_sglist(scmd); + sges_left = scsi_dma_map(scmd); + } +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c +index 04116e02ffe8c..8acf586dc8b2e 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c +@@ -2671,6 +2671,22 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) + _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); + } + ++static inline int _base_scsi_dma_map(struct scsi_cmnd *cmd) ++{ ++ /* ++ * Some firmware versions byte-swap the REPORT ZONES command reply from ++ * ATA-ZAC devices by directly accessing in the host buffer. This does ++ * not respect the default command DMA direction and causes IOMMU page ++ * faults on some architectures with an IOMMU enforcing write mappings ++ * (e.g. AMD hosts). Avoid such issue by making the report zones buffer ++ * mapping bi-directional. ++ */ ++ if (cmd->cmnd[0] == ZBC_IN && cmd->cmnd[1] == ZI_REPORT_ZONES) ++ cmd->sc_data_direction = DMA_BIDIRECTIONAL; ++ ++ return scsi_dma_map(cmd); ++} ++ + /** + * _base_build_sg_scmd - main sg creation routine + * pcie_device is unused here! +@@ -2717,7 +2733,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + + sg_scmd = scsi_sglist(scmd); +- sges_left = scsi_dma_map(scmd); ++ sges_left = _base_scsi_dma_map(scmd); + if (sges_left < 0) + return -ENOMEM; + +@@ -2861,7 +2877,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, + } + + sg_scmd = scsi_sglist(scmd); +- sges_left = scsi_dma_map(scmd); ++ sges_left = _base_scsi_dma_map(scmd); + if (sges_left < 0) + return -ENOMEM; + +diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c +index 079035db7dd85..3c0f7dc9614d1 100644 +--- a/drivers/spi/spi-fsl-lpspi.c ++++ b/drivers/spi/spi-fsl-lpspi.c +@@ -296,7 +296,7 @@ static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi) + static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) + { + struct lpspi_config config = fsl_lpspi->config; +- unsigned int perclk_rate, scldiv; ++ unsigned int perclk_rate, scldiv, div; + u8 prescale; + + perclk_rate = clk_get_rate(fsl_lpspi->clk_per); +@@ -313,8 +313,10 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) + return -EINVAL; + } + ++ div = DIV_ROUND_UP(perclk_rate, config.speed_hz); ++ + for (prescale = 0; prescale < 8; prescale++) { +- scldiv = perclk_rate / config.speed_hz / (1 << prescale) - 2; ++ scldiv = div / (1 << prescale) - 2; + if (scldiv < 256) { + fsl_lpspi->config.prescale = prescale; + break; +diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c +index 1a8dd10012448..b97206d47ec6d 100644 +--- a/drivers/spi/spidev.c ++++ b/drivers/spi/spidev.c +@@ -704,6 +704,7 @@ static const struct file_operations spidev_fops = { + static struct class *spidev_class; + + static const struct spi_device_id spidev_spi_ids[] = { ++ { .name = "bh2228fv" }, + { .name = "dh2228fv" }, + { .name = "ltc2488" }, + { .name = "sx1301" }, +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c +index 2eceef54e0b30..ed8798fdf522a 100644 +--- a/drivers/tty/serial/serial_core.c ++++ b/drivers/tty/serial/serial_core.c +@@ -876,6 +876,14 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port, + new_flags = (__force upf_t)new_info->flags; + old_custom_divisor = uport->custom_divisor; + ++ if (!(uport->flags & UPF_FIXED_PORT)) { ++ unsigned int uartclk = new_info->baud_base * 16; ++ /* check needs to be done here before other settings made */ ++ if (uartclk == 0) { ++ retval = -EINVAL; ++ goto exit; ++ } ++ } + if (!capable(CAP_SYS_ADMIN)) { + retval = -EPERM; + if (change_irq || change_port || +diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c +index 808979a093505..94edac17b95f8 100644 +--- a/drivers/ufs/core/ufshcd.c ++++ b/drivers/ufs/core/ufshcd.c +@@ -3971,11 +3971,16 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) + min_sleep_time_us = + MIN_DELAY_BEFORE_DME_CMDS_US - delta; + else +- return; /* no more delay required */ ++ min_sleep_time_us = 0; /* no more delay required */ + } + +- /* allow sleep for extra 50us if needed */ +- usleep_range(min_sleep_time_us, min_sleep_time_us + 50); ++ if (min_sleep_time_us > 0) { ++ /* allow sleep for extra 50us if needed */ ++ usleep_range(min_sleep_time_us, min_sleep_time_us + 50); ++ } ++ ++ /* update the last_dme_cmd_tstamp */ ++ hba->last_dme_cmd_tstamp = ktime_get(); + } + + /** +@@ -10157,9 +10162,6 @@ int ufshcd_system_restore(struct device *dev) + */ + ufshcd_readl(hba, REG_UTP_TASK_REQ_LIST_BASE_H); + +- /* Resuming from hibernate, assume that link was OFF */ +- ufshcd_set_link_off(hba); +- + return 0; + + } +diff --git a/drivers/usb/gadget/function/f_midi2.c b/drivers/usb/gadget/function/f_midi2.c +index 0e38bb145e8f5..6908fdd4a83f3 100644 +--- a/drivers/usb/gadget/function/f_midi2.c ++++ b/drivers/usb/gadget/function/f_midi2.c +@@ -642,12 +642,21 @@ static void process_ump_stream_msg(struct f_midi2_ep *ep, const u32 *data) + if (format) + return; // invalid + blk = (*data >> 8) & 0xff; +- if (blk >= ep->num_blks) +- return; +- if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO) +- reply_ump_stream_fb_info(ep, blk); +- if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME) +- reply_ump_stream_fb_name(ep, blk); ++ if (blk == 0xff) { ++ /* inquiry for all blocks */ ++ for (blk = 0; blk < ep->num_blks; blk++) { ++ if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO) ++ reply_ump_stream_fb_info(ep, blk); ++ if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME) ++ reply_ump_stream_fb_name(ep, blk); ++ } ++ } else if (blk < ep->num_blks) { ++ /* only the specified block */ ++ if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO) ++ reply_ump_stream_fb_info(ep, blk); ++ if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME) ++ reply_ump_stream_fb_name(ep, blk); ++ } + return; + } + } +diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c +index ec1dceb087293..0be0966973c7f 100644 +--- a/drivers/usb/gadget/function/u_audio.c ++++ b/drivers/usb/gadget/function/u_audio.c +@@ -592,16 +592,25 @@ int u_audio_start_capture(struct g_audio *audio_dev) + struct usb_ep *ep, *ep_fback; + struct uac_rtd_params *prm; + struct uac_params *params = &audio_dev->params; +- int req_len, i; ++ int req_len, i, ret; + + prm = &uac->c_prm; + dev_dbg(dev, "start capture with rate %d\n", prm->srate); + ep = audio_dev->out_ep; +- config_ep_by_speed(gadget, &audio_dev->func, ep); ++ ret = config_ep_by_speed(gadget, &audio_dev->func, ep); ++ if (ret < 0) { ++ dev_err(dev, "config_ep_by_speed for out_ep failed (%d)\n", ret); ++ return ret; ++ } ++ + req_len = ep->maxpacket; + + prm->ep_enabled = true; +- usb_ep_enable(ep); ++ ret = usb_ep_enable(ep); ++ if (ret < 0) { ++ dev_err(dev, "usb_ep_enable failed for out_ep (%d)\n", ret); ++ return ret; ++ } + + for (i = 0; i < params->req_number; i++) { + if (!prm->reqs[i]) { +@@ -629,9 +638,18 @@ int u_audio_start_capture(struct g_audio *audio_dev) + return 0; + + /* Setup feedback endpoint */ +- config_ep_by_speed(gadget, &audio_dev->func, ep_fback); ++ ret = config_ep_by_speed(gadget, &audio_dev->func, ep_fback); ++ if (ret < 0) { ++ dev_err(dev, "config_ep_by_speed in_ep_fback failed (%d)\n", ret); ++ return ret; // TODO: Clean up out_ep ++ } ++ + prm->fb_ep_enabled = true; +- usb_ep_enable(ep_fback); ++ ret = usb_ep_enable(ep_fback); ++ if (ret < 0) { ++ dev_err(dev, "usb_ep_enable failed for in_ep_fback (%d)\n", ret); ++ return ret; // TODO: Clean up out_ep ++ } + req_len = ep_fback->maxpacket; + + req_fback = usb_ep_alloc_request(ep_fback, GFP_ATOMIC); +@@ -687,13 +705,17 @@ int u_audio_start_playback(struct g_audio *audio_dev) + struct uac_params *params = &audio_dev->params; + unsigned int factor; + const struct usb_endpoint_descriptor *ep_desc; +- int req_len, i; ++ int req_len, i, ret; + unsigned int p_pktsize; + + prm = &uac->p_prm; + dev_dbg(dev, "start playback with rate %d\n", prm->srate); + ep = audio_dev->in_ep; +- config_ep_by_speed(gadget, &audio_dev->func, ep); ++ ret = config_ep_by_speed(gadget, &audio_dev->func, ep); ++ if (ret < 0) { ++ dev_err(dev, "config_ep_by_speed for in_ep failed (%d)\n", ret); ++ return ret; ++ } + + ep_desc = ep->desc; + /* +@@ -720,7 +742,11 @@ int u_audio_start_playback(struct g_audio *audio_dev) + uac->p_residue_mil = 0; + + prm->ep_enabled = true; +- usb_ep_enable(ep); ++ ret = usb_ep_enable(ep); ++ if (ret < 0) { ++ dev_err(dev, "usb_ep_enable failed for in_ep (%d)\n", ret); ++ return ret; ++ } + + for (i = 0; i < params->req_number; i++) { + if (!prm->reqs[i]) { +diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c +index a92eb6d909768..8962f96ae7294 100644 +--- a/drivers/usb/gadget/function/u_serial.c ++++ b/drivers/usb/gadget/function/u_serial.c +@@ -1441,6 +1441,7 @@ void gserial_suspend(struct gserial *gser) + spin_lock(&port->port_lock); + spin_unlock(&serial_port_lock); + port->suspended = true; ++ port->start_delayed = true; + spin_unlock_irqrestore(&port->port_lock, flags); + } + EXPORT_SYMBOL_GPL(gserial_suspend); +diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c +index 358394fc3db93..9886e1cb13985 100644 +--- a/drivers/usb/gadget/udc/core.c ++++ b/drivers/usb/gadget/udc/core.c +@@ -118,12 +118,10 @@ int usb_ep_enable(struct usb_ep *ep) + goto out; + + /* UDC drivers can't handle endpoints with maxpacket size 0 */ +- if (usb_endpoint_maxp(ep->desc) == 0) { +- /* +- * We should log an error message here, but we can't call +- * dev_err() because there's no way to find the gadget +- * given only ep. +- */ ++ if (!ep->desc || usb_endpoint_maxp(ep->desc) == 0) { ++ WARN_ONCE(1, "%s: ep%d (%s) has %s\n", __func__, ep->address, ep->name, ++ (!ep->desc) ? "NULL descriptor" : "maxpacket 0"); ++ + ret = -EINVAL; + goto out; + } +diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c +index 6934970f180d7..5a8869cd95d52 100644 +--- a/drivers/usb/serial/usb_debug.c ++++ b/drivers/usb/serial/usb_debug.c +@@ -76,6 +76,11 @@ static void usb_debug_process_read_urb(struct urb *urb) + usb_serial_generic_process_read_urb(urb); + } + ++static void usb_debug_init_termios(struct tty_struct *tty) ++{ ++ tty->termios.c_lflag &= ~(ECHO | ECHONL); ++} ++ + static struct usb_serial_driver debug_device = { + .driver = { + .owner = THIS_MODULE, +@@ -85,6 +90,7 @@ static struct usb_serial_driver debug_device = { + .num_ports = 1, + .bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE, + .break_ctl = usb_debug_break_ctl, ++ .init_termios = usb_debug_init_termios, + .process_read_urb = usb_debug_process_read_urb, + }; + +@@ -96,6 +102,7 @@ static struct usb_serial_driver dbc_device = { + .id_table = dbc_id_table, + .num_ports = 1, + .break_ctl = usb_debug_break_ctl, ++ .init_termios = usb_debug_init_termios, + .process_read_urb = usb_debug_process_read_urb, + }; + +diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c +index 37d1fc34e8a56..14a5f55f24fc8 100644 +--- a/drivers/usb/usbip/vhci_hcd.c ++++ b/drivers/usb/usbip/vhci_hcd.c +@@ -745,6 +745,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag + * + */ + if (usb_pipedevice(urb->pipe) == 0) { ++ struct usb_device *old; + __u8 type = usb_pipetype(urb->pipe); + struct usb_ctrlrequest *ctrlreq = + (struct usb_ctrlrequest *) urb->setup_packet; +@@ -755,14 +756,15 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag + goto no_need_xmit; + } + ++ old = vdev->udev; + switch (ctrlreq->bRequest) { + case USB_REQ_SET_ADDRESS: + /* set_address may come when a device is reset */ + dev_info(dev, "SetAddress Request (%d) to port %d\n", + ctrlreq->wValue, vdev->rhport); + +- usb_put_dev(vdev->udev); + vdev->udev = usb_get_dev(urb->dev); ++ usb_put_dev(old); + + spin_lock(&vdev->ud.lock); + vdev->ud.status = VDEV_ST_USED; +@@ -781,8 +783,8 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag + usbip_dbg_vhci_hc( + "Not yet?:Get_Descriptor to device 0 (get max pipe size)\n"); + +- usb_put_dev(vdev->udev); + vdev->udev = usb_get_dev(urb->dev); ++ usb_put_dev(old); + goto out; + + default: +@@ -1067,6 +1069,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud) + static void vhci_device_reset(struct usbip_device *ud) + { + struct vhci_device *vdev = container_of(ud, struct vhci_device, ud); ++ struct usb_device *old = vdev->udev; + unsigned long flags; + + spin_lock_irqsave(&ud->lock, flags); +@@ -1074,8 +1077,8 @@ static void vhci_device_reset(struct usbip_device *ud) + vdev->speed = 0; + vdev->devid = 0; + +- usb_put_dev(vdev->udev); + vdev->udev = NULL; ++ usb_put_dev(old); + + if (ud->tcp_socket) { + sockfd_put(ud->tcp_socket); +diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c +index fb590e346e43d..da2c31ccc1380 100644 +--- a/drivers/vhost/vdpa.c ++++ b/drivers/vhost/vdpa.c +@@ -1378,13 +1378,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf) + + notify = ops->get_vq_notification(vdpa, index); + +- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); +- if (remap_pfn_range(vma, vmf->address & PAGE_MASK, +- PFN_DOWN(notify.addr), PAGE_SIZE, +- vma->vm_page_prot)) +- return VM_FAULT_SIGBUS; +- +- return VM_FAULT_NOPAGE; ++ return vmf_insert_pfn(vma, vmf->address & PAGE_MASK, PFN_DOWN(notify.addr)); + } + + static const struct vm_operations_struct vhost_vdpa_vm_ops = { +diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c +index da88173bac432..923f064c7e3e9 100644 +--- a/drivers/xen/privcmd.c ++++ b/drivers/xen/privcmd.c +@@ -841,7 +841,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file, + #ifdef CONFIG_XEN_PRIVCMD_IRQFD + /* Irqfd support */ + static struct workqueue_struct *irqfd_cleanup_wq; +-static DEFINE_MUTEX(irqfds_lock); ++static DEFINE_SPINLOCK(irqfds_lock); + static LIST_HEAD(irqfds_list); + + struct privcmd_kernel_irqfd { +@@ -905,9 +905,11 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key) + irqfd_inject(kirqfd); + + if (flags & EPOLLHUP) { +- mutex_lock(&irqfds_lock); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&irqfds_lock, flags); + irqfd_deactivate(kirqfd); +- mutex_unlock(&irqfds_lock); ++ spin_unlock_irqrestore(&irqfds_lock, flags); + } + + return 0; +@@ -925,6 +927,7 @@ irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt) + static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd) + { + struct privcmd_kernel_irqfd *kirqfd, *tmp; ++ unsigned long flags; + __poll_t events; + struct fd f; + void *dm_op; +@@ -964,18 +967,18 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd) + init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup); + init_poll_funcptr(&kirqfd->pt, irqfd_poll_func); + +- mutex_lock(&irqfds_lock); ++ spin_lock_irqsave(&irqfds_lock, flags); + + list_for_each_entry(tmp, &irqfds_list, list) { + if (kirqfd->eventfd == tmp->eventfd) { + ret = -EBUSY; +- mutex_unlock(&irqfds_lock); ++ spin_unlock_irqrestore(&irqfds_lock, flags); + goto error_eventfd; + } + } + + list_add_tail(&kirqfd->list, &irqfds_list); +- mutex_unlock(&irqfds_lock); ++ spin_unlock_irqrestore(&irqfds_lock, flags); + + /* + * Check if there was an event already pending on the eventfd before we +@@ -1007,12 +1010,13 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd) + { + struct privcmd_kernel_irqfd *kirqfd; + struct eventfd_ctx *eventfd; ++ unsigned long flags; + + eventfd = eventfd_ctx_fdget(irqfd->fd); + if (IS_ERR(eventfd)) + return PTR_ERR(eventfd); + +- mutex_lock(&irqfds_lock); ++ spin_lock_irqsave(&irqfds_lock, flags); + + list_for_each_entry(kirqfd, &irqfds_list, list) { + if (kirqfd->eventfd == eventfd) { +@@ -1021,7 +1025,7 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd) + } + } + +- mutex_unlock(&irqfds_lock); ++ spin_unlock_irqrestore(&irqfds_lock, flags); + + eventfd_ctx_put(eventfd); + +@@ -1069,13 +1073,14 @@ static int privcmd_irqfd_init(void) + static void privcmd_irqfd_exit(void) + { + struct privcmd_kernel_irqfd *kirqfd, *tmp; ++ unsigned long flags; + +- mutex_lock(&irqfds_lock); ++ spin_lock_irqsave(&irqfds_lock, flags); + + list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list) + irqfd_deactivate(kirqfd); + +- mutex_unlock(&irqfds_lock); ++ spin_unlock_irqrestore(&irqfds_lock, flags); + + destroy_workqueue(irqfd_cleanup_wq); + } +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index 06333a74d6c4c..86c7f8ce1715e 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -445,6 +445,7 @@ struct btrfs_file_private { + void *filldir_buf; + u64 last_index; + struct extent_state *llseek_cached_state; ++ bool fsync_skip_inode_lock; + }; + + static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info) +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index 9fbffd84b16c5..c6a95dfa59c81 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -2172,10 +2172,8 @@ void extent_write_locked_range(struct inode *inode, struct page *locked_page, + + page = find_get_page(mapping, cur >> PAGE_SHIFT); + ASSERT(PageLocked(page)); +- if (pages_dirty && page != locked_page) { ++ if (pages_dirty && page != locked_page) + ASSERT(PageDirty(page)); +- clear_page_dirty_for_io(page); +- } + + ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl, + i_size, &nr); +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c +index c997b790568fa..952cf145c6295 100644 +--- a/fs/btrfs/file.c ++++ b/fs/btrfs/file.c +@@ -1535,21 +1535,37 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) + * So here we disable page faults in the iov_iter and then retry if we + * got -EFAULT, faulting in the pages before the retry. + */ ++again: + from->nofault = true; + dio = btrfs_dio_write(iocb, from, written); + from->nofault = false; + +- /* +- * iomap_dio_complete() will call btrfs_sync_file() if we have a dsync +- * iocb, and that needs to lock the inode. So unlock it before calling +- * iomap_dio_complete() to avoid a deadlock. +- */ +- btrfs_inode_unlock(BTRFS_I(inode), ilock_flags); +- +- if (IS_ERR_OR_NULL(dio)) ++ if (IS_ERR_OR_NULL(dio)) { + err = PTR_ERR_OR_ZERO(dio); +- else ++ } else { ++ struct btrfs_file_private stack_private = { 0 }; ++ struct btrfs_file_private *private; ++ const bool have_private = (file->private_data != NULL); ++ ++ if (!have_private) ++ file->private_data = &stack_private; ++ ++ /* ++ * If we have a synchoronous write, we must make sure the fsync ++ * triggered by the iomap_dio_complete() call below doesn't ++ * deadlock on the inode lock - we are already holding it and we ++ * can't call it after unlocking because we may need to complete ++ * partial writes due to the input buffer (or parts of it) not ++ * being already faulted in. ++ */ ++ private = file->private_data; ++ private->fsync_skip_inode_lock = true; + err = iomap_dio_complete(dio); ++ private->fsync_skip_inode_lock = false; ++ ++ if (!have_private) ++ file->private_data = NULL; ++ } + + /* No increment (+=) because iomap returns a cumulative value. */ + if (err > 0) +@@ -1576,10 +1592,12 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) + } else { + fault_in_iov_iter_readable(from, left); + prev_left = left; +- goto relock; ++ goto again; + } + } + ++ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags); ++ + /* + * If 'err' is -ENOTBLK or we have not written all data, then it means + * we must fallback to buffered IO. +@@ -1778,6 +1796,7 @@ static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx) + */ + int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) + { ++ struct btrfs_file_private *private = file->private_data; + struct dentry *dentry = file_dentry(file); + struct inode *inode = d_inode(dentry); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); +@@ -1787,6 +1806,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) + int ret = 0, err; + u64 len; + bool full_sync; ++ const bool skip_ilock = (private ? private->fsync_skip_inode_lock : false); + + trace_btrfs_sync_file(file, datasync); + +@@ -1814,7 +1834,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) + if (ret) + goto out; + +- btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); ++ if (skip_ilock) ++ down_write(&BTRFS_I(inode)->i_mmap_lock); ++ else ++ btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); + + atomic_inc(&root->log_batch); + +@@ -1838,7 +1861,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) + */ + ret = start_ordered_ops(inode, start, end); + if (ret) { +- btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); ++ if (skip_ilock) ++ up_write(&BTRFS_I(inode)->i_mmap_lock); ++ else ++ btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); + goto out; + } + +@@ -1941,7 +1967,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) + * file again, but that will end up using the synchronization + * inside btrfs_sync_log to keep things safe. + */ +- btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); ++ if (skip_ilock) ++ up_write(&BTRFS_I(inode)->i_mmap_lock); ++ else ++ btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); + + if (ret == BTRFS_NO_LOG_SYNC) { + ret = btrfs_end_transaction(trans); +@@ -2009,7 +2038,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) + + out_release_extents: + btrfs_release_log_ctx_extents(&ctx); +- btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); ++ if (skip_ilock) ++ up_write(&BTRFS_I(inode)->i_mmap_lock); ++ else ++ btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); + goto out; + } + +diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c +index f59e599766662..3e141c4dd2630 100644 +--- a/fs/btrfs/free-space-cache.c ++++ b/fs/btrfs/free-space-cache.c +@@ -855,6 +855,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, + spin_unlock(&ctl->tree_lock); + btrfs_err(fs_info, + "Duplicate entries in free space cache, dumping"); ++ kmem_cache_free(btrfs_free_space_bitmap_cachep, e->bitmap); + kmem_cache_free(btrfs_free_space_cachep, e); + goto free_cache; + } +diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c +index 0c93439e929fb..815a5fc3ff9d8 100644 +--- a/fs/btrfs/print-tree.c ++++ b/fs/btrfs/print-tree.c +@@ -12,7 +12,7 @@ + + struct root_name_map { + u64 id; +- char name[16]; ++ const char *name; + }; + + static const struct root_name_map root_map[] = { +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c +index 012d9259ff532..a604aa1d23aed 100644 +--- a/fs/ext4/inline.c ++++ b/fs/ext4/inline.c +@@ -1411,7 +1411,11 @@ int ext4_inlinedir_to_tree(struct file *dir_file, + hinfo->hash = EXT4_DIRENT_HASH(de); + hinfo->minor_hash = EXT4_DIRENT_MINOR_HASH(de); + } else { +- ext4fs_dirhash(dir, de->name, de->name_len, hinfo); ++ err = ext4fs_dirhash(dir, de->name, de->name_len, hinfo); ++ if (err) { ++ ret = err; ++ goto out; ++ } + } + if ((hinfo->hash < start_hash) || + ((hinfo->hash == start_hash) && +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c +index 0168d28427077..57264eb4d9da3 100644 +--- a/fs/jbd2/journal.c ++++ b/fs/jbd2/journal.c +@@ -399,6 +399,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, + tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS); + if (!tmp) { + brelse(new_bh); ++ free_buffer_head(new_bh); + return -ENOMEM; + } + spin_lock(&jh_in->b_state_lock); +diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c +index c71ae5c043060..4a20e92474b23 100644 +--- a/fs/smb/client/cifs_debug.c ++++ b/fs/smb/client/cifs_debug.c +@@ -1072,7 +1072,7 @@ static int cifs_security_flags_proc_open(struct inode *inode, struct file *file) + static void + cifs_security_flags_handle_must_flags(unsigned int *flags) + { +- unsigned int signflags = *flags & CIFSSEC_MUST_SIGN; ++ unsigned int signflags = *flags & (CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL); + + if ((*flags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) + *flags = CIFSSEC_MUST_KRB5; +diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h +index 53e00255d96b6..54a84003950a4 100644 +--- a/fs/smb/client/cifsglob.h ++++ b/fs/smb/client/cifsglob.h +@@ -1922,7 +1922,7 @@ static inline bool is_replayable_error(int error) + #define CIFSSEC_MAY_SIGN 0x00001 + #define CIFSSEC_MAY_NTLMV2 0x00004 + #define CIFSSEC_MAY_KRB5 0x00008 +-#define CIFSSEC_MAY_SEAL 0x00040 /* not supported yet */ ++#define CIFSSEC_MAY_SEAL 0x00040 + #define CIFSSEC_MAY_NTLMSSP 0x00080 /* raw ntlmssp with ntlmv2 */ + + #define CIFSSEC_MUST_SIGN 0x01001 +@@ -1932,11 +1932,11 @@ require use of the stronger protocol */ + #define CIFSSEC_MUST_NTLMV2 0x04004 + #define CIFSSEC_MUST_KRB5 0x08008 + #ifdef CONFIG_CIFS_UPCALL +-#define CIFSSEC_MASK 0x8F08F /* flags supported if no weak allowed */ ++#define CIFSSEC_MASK 0xCF0CF /* flags supported if no weak allowed */ + #else +-#define CIFSSEC_MASK 0x87087 /* flags supported if no weak allowed */ ++#define CIFSSEC_MASK 0xC70C7 /* flags supported if no weak allowed */ + #endif /* UPCALL */ +-#define CIFSSEC_MUST_SEAL 0x40040 /* not supported yet */ ++#define CIFSSEC_MUST_SEAL 0x40040 + #define CIFSSEC_MUST_NTLMSSP 0x80080 /* raw ntlmssp with ntlmv2 */ + + #define CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP | CIFSSEC_MAY_SEAL) +diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c +index 9cdbc3ccc1d14..e74ba047902d8 100644 +--- a/fs/smb/client/inode.c ++++ b/fs/smb/client/inode.c +@@ -1023,13 +1023,26 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data, + } + + rc = -EOPNOTSUPP; +- switch ((data->reparse.tag = tag)) { +- case 0: /* SMB1 symlink */ ++ data->reparse.tag = tag; ++ if (!data->reparse.tag) { + if (server->ops->query_symlink) { + rc = server->ops->query_symlink(xid, tcon, + cifs_sb, full_path, + &data->symlink_target); + } ++ if (rc == -EOPNOTSUPP) ++ data->reparse.tag = IO_REPARSE_TAG_INTERNAL; ++ } ++ ++ switch (data->reparse.tag) { ++ case 0: /* SMB1 symlink */ ++ break; ++ case IO_REPARSE_TAG_INTERNAL: ++ rc = 0; ++ if (le32_to_cpu(data->fi.Attributes) & ATTR_DIRECTORY) { ++ cifs_create_junction_fattr(fattr, sb); ++ goto out; ++ } + break; + case IO_REPARSE_TAG_MOUNT_POINT: + cifs_create_junction_fattr(fattr, sb); +diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c +index 07c468ddb88a8..65d4b72b4d51a 100644 +--- a/fs/smb/client/misc.c ++++ b/fs/smb/client/misc.c +@@ -1288,6 +1288,7 @@ int cifs_inval_name_dfs_link_error(const unsigned int xid, + const char *full_path, + bool *islink) + { ++ struct TCP_Server_Info *server = tcon->ses->server; + struct cifs_ses *ses = tcon->ses; + size_t len; + char *path; +@@ -1304,12 +1305,12 @@ int cifs_inval_name_dfs_link_error(const unsigned int xid, + !is_tcon_dfs(tcon)) + return 0; + +- spin_lock(&tcon->tc_lock); +- if (!tcon->origin_fullpath) { +- spin_unlock(&tcon->tc_lock); ++ spin_lock(&server->srv_lock); ++ if (!server->leaf_fullpath) { ++ spin_unlock(&server->srv_lock); + return 0; + } +- spin_unlock(&tcon->tc_lock); ++ spin_unlock(&server->srv_lock); + + /* + * Slow path - tcon is DFS and @full_path has prefix path, so attempt +diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c +index a0ffbda907331..689d8a506d459 100644 +--- a/fs/smb/client/reparse.c ++++ b/fs/smb/client/reparse.c +@@ -505,6 +505,10 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb, + } + + switch (tag) { ++ case IO_REPARSE_TAG_INTERNAL: ++ if (!(fattr->cf_cifsattrs & ATTR_DIRECTORY)) ++ return false; ++ fallthrough; + case IO_REPARSE_TAG_DFS: + case IO_REPARSE_TAG_DFSR: + case IO_REPARSE_TAG_MOUNT_POINT: +diff --git a/fs/smb/client/reparse.h b/fs/smb/client/reparse.h +index 6b55d1df9e2f8..2c0644bc4e65a 100644 +--- a/fs/smb/client/reparse.h ++++ b/fs/smb/client/reparse.h +@@ -12,6 +12,12 @@ + #include "fs_context.h" + #include "cifsglob.h" + ++/* ++ * Used only by cifs.ko to ignore reparse points from files when client or ++ * server doesn't support FSCTL_GET_REPARSE_POINT. ++ */ ++#define IO_REPARSE_TAG_INTERNAL ((__u32)~0U) ++ + static inline dev_t reparse_nfs_mkdev(struct reparse_posix_data *buf) + { + u64 v = le64_to_cpu(*(__le64 *)buf->DataBuffer); +@@ -78,10 +84,19 @@ static inline u32 reparse_mode_wsl_tag(mode_t mode) + static inline bool reparse_inode_match(struct inode *inode, + struct cifs_fattr *fattr) + { ++ struct cifsInodeInfo *cinode = CIFS_I(inode); + struct timespec64 ctime = inode_get_ctime(inode); + +- return (CIFS_I(inode)->cifsAttrs & ATTR_REPARSE) && +- CIFS_I(inode)->reparse_tag == fattr->cf_cifstag && ++ /* ++ * Do not match reparse tags when client or server doesn't support ++ * FSCTL_GET_REPARSE_POINT. @fattr->cf_cifstag should contain correct ++ * reparse tag from query dir response but the client won't be able to ++ * read the reparse point data anyway. This spares us a revalidation. ++ */ ++ if (cinode->reparse_tag != IO_REPARSE_TAG_INTERNAL && ++ cinode->reparse_tag != fattr->cf_cifstag) ++ return false; ++ return (cinode->cifsAttrs & ATTR_REPARSE) && + timespec64_equal(&ctime, &fattr->cf_ctime); + } + +diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c +index 86f8c81791374..28031c7ba6b19 100644 +--- a/fs/smb/client/smb2inode.c ++++ b/fs/smb/client/smb2inode.c +@@ -930,6 +930,8 @@ int smb2_query_path_info(const unsigned int xid, + + switch (rc) { + case 0: ++ rc = parse_create_response(data, cifs_sb, &out_iov[0]); ++ break; + case -EOPNOTSUPP: + /* + * BB TODO: When support for special files added to Samba +diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c +index a5efce03cb58e..61df8a5c68242 100644 +--- a/fs/smb/client/smb2pdu.c ++++ b/fs/smb/client/smb2pdu.c +@@ -80,6 +80,9 @@ int smb3_encryption_required(const struct cifs_tcon *tcon) + if (tcon->seal && + (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) + return 1; ++ if (((global_secflags & CIFSSEC_MUST_SEAL) == CIFSSEC_MUST_SEAL) && ++ (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) ++ return 1; + return 0; + } + +diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c +index b406bb3430f3d..aa54be1ce1242 100644 +--- a/fs/tracefs/event_inode.c ++++ b/fs/tracefs/event_inode.c +@@ -113,7 +113,7 @@ static void release_ei(struct kref *ref) + entry->release(entry->name, ei->data); + } + +- call_rcu(&ei->rcu, free_ei_rcu); ++ call_srcu(&eventfs_srcu, &ei->rcu, free_ei_rcu); + } + + static inline void put_ei(struct eventfs_inode *ei) +@@ -806,7 +806,7 @@ struct eventfs_inode *eventfs_create_dir(const char *name, struct eventfs_inode + /* Was the parent freed? */ + if (list_empty(&ei->list)) { + cleanup_ei(ei); +- ei = NULL; ++ ei = ERR_PTR(-EBUSY); + } + return ei; + } +diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c +index 4ea11d1f72ace..7d389dd5ed519 100644 +--- a/fs/tracefs/inode.c ++++ b/fs/tracefs/inode.c +@@ -42,7 +42,7 @@ static struct inode *tracefs_alloc_inode(struct super_block *sb) + struct tracefs_inode *ti; + unsigned long flags; + +- ti = kmem_cache_alloc(tracefs_inode_cachep, GFP_KERNEL); ++ ti = alloc_inode_sb(sb, tracefs_inode_cachep, GFP_KERNEL); + if (!ti) + return NULL; + +@@ -53,15 +53,14 @@ static struct inode *tracefs_alloc_inode(struct super_block *sb) + return &ti->vfs_inode; + } + +-static void tracefs_free_inode_rcu(struct rcu_head *rcu) ++static void tracefs_free_inode(struct inode *inode) + { +- struct tracefs_inode *ti; ++ struct tracefs_inode *ti = get_tracefs(inode); + +- ti = container_of(rcu, struct tracefs_inode, rcu); + kmem_cache_free(tracefs_inode_cachep, ti); + } + +-static void tracefs_free_inode(struct inode *inode) ++static void tracefs_destroy_inode(struct inode *inode) + { + struct tracefs_inode *ti = get_tracefs(inode); + unsigned long flags; +@@ -69,8 +68,6 @@ static void tracefs_free_inode(struct inode *inode) + spin_lock_irqsave(&tracefs_inode_lock, flags); + list_del_rcu(&ti->list); + spin_unlock_irqrestore(&tracefs_inode_lock, flags); +- +- call_rcu(&ti->rcu, tracefs_free_inode_rcu); + } + + static ssize_t default_read_file(struct file *file, char __user *buf, +@@ -458,6 +455,7 @@ static int tracefs_drop_inode(struct inode *inode) + static const struct super_operations tracefs_super_operations = { + .alloc_inode = tracefs_alloc_inode, + .free_inode = tracefs_free_inode, ++ .destroy_inode = tracefs_destroy_inode, + .drop_inode = tracefs_drop_inode, + .statfs = simple_statfs, + .remount_fs = tracefs_remount, +diff --git a/fs/tracefs/internal.h b/fs/tracefs/internal.h +index f704d8348357e..d83c2a25f288e 100644 +--- a/fs/tracefs/internal.h ++++ b/fs/tracefs/internal.h +@@ -10,10 +10,7 @@ enum { + }; + + struct tracefs_inode { +- union { +- struct inode vfs_inode; +- struct rcu_head rcu; +- }; ++ struct inode vfs_inode; + /* The below gets initialized with memset_after(ti, 0, vfs_inode) */ + struct list_head list; + unsigned long flags; +diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c +index 558ad046972ad..bb471ec364046 100644 +--- a/fs/udf/balloc.c ++++ b/fs/udf/balloc.c +@@ -18,6 +18,7 @@ + #include "udfdecl.h" + + #include ++#include + + #include "udf_i.h" + #include "udf_sb.h" +@@ -140,7 +141,6 @@ static void udf_bitmap_free_blocks(struct super_block *sb, + { + struct udf_sb_info *sbi = UDF_SB(sb); + struct buffer_head *bh = NULL; +- struct udf_part_map *partmap; + unsigned long block; + unsigned long block_group; + unsigned long bit; +@@ -149,19 +149,9 @@ static void udf_bitmap_free_blocks(struct super_block *sb, + unsigned long overflow; + + mutex_lock(&sbi->s_alloc_mutex); +- partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; +- if (bloc->logicalBlockNum + count < count || +- (bloc->logicalBlockNum + count) > partmap->s_partition_len) { +- udf_debug("%u < %d || %u + %u > %u\n", +- bloc->logicalBlockNum, 0, +- bloc->logicalBlockNum, count, +- partmap->s_partition_len); +- goto error_return; +- } +- ++ /* We make sure this cannot overflow when mounting the filesystem */ + block = bloc->logicalBlockNum + offset + + (sizeof(struct spaceBitmapDesc) << 3); +- + do { + overflow = 0; + block_group = block >> (sb->s_blocksize_bits + 3); +@@ -391,7 +381,6 @@ static void udf_table_free_blocks(struct super_block *sb, + uint32_t count) + { + struct udf_sb_info *sbi = UDF_SB(sb); +- struct udf_part_map *partmap; + uint32_t start, end; + uint32_t elen; + struct kernel_lb_addr eloc; +@@ -400,16 +389,6 @@ static void udf_table_free_blocks(struct super_block *sb, + struct udf_inode_info *iinfo; + + mutex_lock(&sbi->s_alloc_mutex); +- partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; +- if (bloc->logicalBlockNum + count < count || +- (bloc->logicalBlockNum + count) > partmap->s_partition_len) { +- udf_debug("%u < %d || %u + %u > %u\n", +- bloc->logicalBlockNum, 0, +- bloc->logicalBlockNum, count, +- partmap->s_partition_len); +- goto error_return; +- } +- + iinfo = UDF_I(table); + udf_add_free_space(sb, sbi->s_partition, count); + +@@ -684,6 +663,17 @@ void udf_free_blocks(struct super_block *sb, struct inode *inode, + { + uint16_t partition = bloc->partitionReferenceNum; + struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; ++ uint32_t blk; ++ ++ if (check_add_overflow(bloc->logicalBlockNum, offset, &blk) || ++ check_add_overflow(blk, count, &blk) || ++ bloc->logicalBlockNum + count > map->s_partition_len) { ++ udf_debug("Invalid request to free blocks: (%d, %u), off %u, " ++ "len %u, partition len %u\n", ++ partition, bloc->logicalBlockNum, offset, count, ++ map->s_partition_len); ++ return; ++ } + + if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) { + udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap, +diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c +index 57f366c3d3554..9f9d3abad2cf3 100644 +--- a/fs/xfs/xfs_log_recover.c ++++ b/fs/xfs/xfs_log_recover.c +@@ -2965,7 +2965,7 @@ xlog_do_recovery_pass( + int error = 0, h_size, h_len; + int error2 = 0; + int bblks, split_bblks; +- int hblks, split_hblks, wrapped_hblks; ++ int hblks = 1, split_hblks, wrapped_hblks; + int i; + struct hlist_head rhash[XLOG_RHASH_SIZE]; + LIST_HEAD (buffer_list); +@@ -3021,14 +3021,22 @@ xlog_do_recovery_pass( + if (error) + goto bread_err1; + +- hblks = xlog_logrec_hblks(log, rhead); +- if (hblks != 1) { +- kmem_free(hbp); +- hbp = xlog_alloc_buffer(log, hblks); ++ /* ++ * This open codes xlog_logrec_hblks so that we can reuse the ++ * fixed up h_size value calculated above. Without that we'd ++ * still allocate the buffer based on the incorrect on-disk ++ * size. ++ */ ++ if (h_size > XLOG_HEADER_CYCLE_SIZE && ++ (rhead->h_version & cpu_to_be32(XLOG_VERSION_2))) { ++ hblks = DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE); ++ if (hblks > 1) { ++ kmem_free(hbp); ++ hbp = xlog_alloc_buffer(log, hblks); ++ } + } + } else { + ASSERT(log->l_sectBBsize == 1); +- hblks = 1; + hbp = xlog_alloc_buffer(log, 1); + h_size = XLOG_BIG_RECORD_BSIZE; + } +diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h +index 378b2459efe2d..f7cc8080672cc 100644 +--- a/include/linux/blk-integrity.h ++++ b/include/linux/blk-integrity.h +@@ -105,14 +105,13 @@ static inline bool blk_integrity_rq(struct request *rq) + } + + /* +- * Return the first bvec that contains integrity data. Only drivers that are +- * limited to a single integrity segment should use this helper. ++ * Return the current bvec that contains the integrity data. bip_iter may be ++ * advanced to iterate over the integrity data. + */ +-static inline struct bio_vec *rq_integrity_vec(struct request *rq) ++static inline struct bio_vec rq_integrity_vec(struct request *rq) + { +- if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1)) +- return NULL; +- return rq->bio->bi_integrity->bip_vec; ++ return mp_bvec_iter_bvec(rq->bio->bi_integrity->bip_vec, ++ rq->bio->bi_integrity->bip_iter); + } + #else /* CONFIG_BLK_DEV_INTEGRITY */ + static inline int blk_rq_count_integrity_sg(struct request_queue *q, +@@ -176,9 +175,10 @@ static inline int blk_integrity_rq(struct request *rq) + return 0; + } + +-static inline struct bio_vec *rq_integrity_vec(struct request *rq) ++static inline struct bio_vec rq_integrity_vec(struct request *rq) + { +- return NULL; ++ /* the optimizer will remove all calls to this function */ ++ return (struct bio_vec){ }; + } + #endif /* CONFIG_BLK_DEV_INTEGRITY */ + #endif /* _LINUX_BLK_INTEGRITY_H */ +diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h +index 1d42d4b173271..0ad8b550bb4b4 100644 +--- a/include/linux/clocksource.h ++++ b/include/linux/clocksource.h +@@ -291,7 +291,19 @@ static inline void timer_probe(void) {} + #define TIMER_ACPI_DECLARE(name, table_id, fn) \ + ACPI_DECLARE_PROBE_ENTRY(timer, name, table_id, 0, NULL, 0, fn) + +-extern ulong max_cswd_read_retries; ++static inline unsigned int clocksource_get_max_watchdog_retry(void) ++{ ++ /* ++ * When system is in the boot phase or under heavy workload, there ++ * can be random big latencies during the clocksource/watchdog ++ * read, so allow retries to filter the noise latency. As the ++ * latency's frequency and maximum value goes up with the number of ++ * CPUs, scale the number of retries with the number of online ++ * CPUs. ++ */ ++ return (ilog2(num_online_cpus()) / 2) + 1; ++} ++ + void clocksource_verify_percpu(struct clocksource *cs); + + #endif /* _LINUX_CLOCKSOURCE_H */ +diff --git a/include/linux/fs.h b/include/linux/fs.h +index ee5efad0d7801..56dce38c47862 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -642,6 +642,7 @@ struct inode { + umode_t i_mode; + unsigned short i_opflags; + kuid_t i_uid; ++ struct list_head i_lru; /* inode LRU list */ + kgid_t i_gid; + unsigned int i_flags; + +@@ -703,7 +704,6 @@ struct inode { + u16 i_wb_frn_avg_time; + u16 i_wb_frn_history; + #endif +- struct list_head i_lru; /* inode LRU list */ + struct list_head i_sb_list; + struct list_head i_wb_list; /* backing dev writeback list */ + union { +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h +index 0a85ff5c8db3c..abff4e3b6a58b 100644 +--- a/include/linux/pci_ids.h ++++ b/include/linux/pci_ids.h +@@ -2124,6 +2124,8 @@ + + #define PCI_VENDOR_ID_CHELSIO 0x1425 + ++#define PCI_VENDOR_ID_EDIMAX 0x1432 ++ + #define PCI_VENDOR_ID_ADLINK 0x144a + + #define PCI_VENDOR_ID_SAMSUNG 0x144d +diff --git a/include/linux/profile.h b/include/linux/profile.h +index 11db1ec516e27..12da750a88a04 100644 +--- a/include/linux/profile.h ++++ b/include/linux/profile.h +@@ -11,7 +11,6 @@ + + #define CPU_PROFILING 1 + #define SCHED_PROFILING 2 +-#define SLEEP_PROFILING 3 + #define KVM_PROFILING 4 + + struct proc_dir_entry; +diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h +index 696f8dc4aa53c..cb8bd759e8005 100644 +--- a/include/linux/trace_events.h ++++ b/include/linux/trace_events.h +@@ -869,7 +869,6 @@ do { \ + struct perf_event; + + DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); +-DECLARE_PER_CPU(int, bpf_kprobe_override); + + extern int perf_trace_init(struct perf_event *event); + extern void perf_trace_destroy(struct perf_event *event); +diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h +index d1d7825318c32..6c395a2600e8d 100644 +--- a/include/linux/virtio_net.h ++++ b/include/linux/virtio_net.h +@@ -56,7 +56,6 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, + unsigned int thlen = 0; + unsigned int p_off = 0; + unsigned int ip_proto; +- u64 ret, remainder, gso_size; + + if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { + switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { +@@ -99,16 +98,6 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, + u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset); + u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16)); + +- if (hdr->gso_size) { +- gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size); +- ret = div64_u64_rem(skb->len, gso_size, &remainder); +- if (!(ret && (hdr->gso_size > needed) && +- ((remainder > needed) || (remainder == 0)))) { +- return -EINVAL; +- } +- skb_shinfo(skb)->tx_flags |= SKBFL_SHARED_FRAG; +- } +- + if (!pskb_may_pull(skb, needed)) + return -EINVAL; + +@@ -182,6 +171,11 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, + if (gso_type != SKB_GSO_UDP_L4) + return -EINVAL; + break; ++ case SKB_GSO_TCPV4: ++ case SKB_GSO_TCPV6: ++ if (skb->csum_offset != offsetof(struct tcphdr, check)) ++ return -EINVAL; ++ break; + } + + /* Kernel has a special handling for GSO_BY_FRAGS. */ +diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h +index b32539bb0fb05..61cfc8891f820 100644 +--- a/include/net/ip6_route.h ++++ b/include/net/ip6_route.h +@@ -128,18 +128,26 @@ void rt6_age_exceptions(struct fib6_info *f6i, struct fib6_gc_args *gc_args, + + static inline int ip6_route_get_saddr(struct net *net, struct fib6_info *f6i, + const struct in6_addr *daddr, +- unsigned int prefs, ++ unsigned int prefs, int l3mdev_index, + struct in6_addr *saddr) + { ++ struct net_device *l3mdev; ++ struct net_device *dev; ++ bool same_vrf; + int err = 0; + +- if (f6i && f6i->fib6_prefsrc.plen) { ++ rcu_read_lock(); ++ ++ l3mdev = dev_get_by_index_rcu(net, l3mdev_index); ++ if (!f6i || !f6i->fib6_prefsrc.plen || l3mdev) ++ dev = f6i ? fib6_info_nh_dev(f6i) : NULL; ++ same_vrf = !l3mdev || l3mdev_master_dev_rcu(dev) == l3mdev; ++ if (f6i && f6i->fib6_prefsrc.plen && same_vrf) + *saddr = f6i->fib6_prefsrc.addr; +- } else { +- struct net_device *dev = f6i ? fib6_info_nh_dev(f6i) : NULL; ++ else ++ err = ipv6_dev_get_saddr(net, same_vrf ? dev : l3mdev, daddr, prefs, saddr); + +- err = ipv6_dev_get_saddr(net, dev, daddr, prefs, saddr); +- } ++ rcu_read_unlock(); + + return err; + } +diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h +index d7353024016cc..af0af3f1d9b7c 100644 +--- a/include/trace/events/intel_ifs.h ++++ b/include/trace/events/intel_ifs.h +@@ -10,25 +10,25 @@ + + TRACE_EVENT(ifs_status, + +- TP_PROTO(int cpu, union ifs_scan activate, union ifs_status status), ++ TP_PROTO(int cpu, int start, int stop, u64 status), + +- TP_ARGS(cpu, activate, status), ++ TP_ARGS(cpu, start, stop, status), + + TP_STRUCT__entry( + __field( u64, status ) + __field( int, cpu ) +- __field( u8, start ) +- __field( u8, stop ) ++ __field( u16, start ) ++ __field( u16, stop ) + ), + + TP_fast_assign( + __entry->cpu = cpu; +- __entry->start = activate.start; +- __entry->stop = activate.stop; +- __entry->status = status.data; ++ __entry->start = start; ++ __entry->stop = stop; ++ __entry->status = status; + ), + +- TP_printk("cpu: %d, start: %.2x, stop: %.2x, status: %llx", ++ TP_printk("cpu: %d, start: %.4x, stop: %.4x, status: %.16llx", + __entry->cpu, + __entry->start, + __entry->stop, +diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c +index 5c9888901ef44..46094f0c9fcda 100644 +--- a/kernel/irq/irqdesc.c ++++ b/kernel/irq/irqdesc.c +@@ -517,6 +517,7 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node, + flags = IRQD_AFFINITY_MANAGED | + IRQD_MANAGED_SHUTDOWN; + } ++ flags |= IRQD_AFFINITY_SET; + mask = &affinity->mask; + node = cpu_to_node(cpumask_first(mask)); + affinity++; +diff --git a/kernel/jump_label.c b/kernel/jump_label.c +index eec802175ccc6..1ed269b2c4035 100644 +--- a/kernel/jump_label.c ++++ b/kernel/jump_label.c +@@ -231,7 +231,7 @@ void static_key_disable_cpuslocked(struct static_key *key) + } + + jump_label_lock(); +- if (atomic_cmpxchg(&key->enabled, 1, 0)) ++ if (atomic_cmpxchg(&key->enabled, 1, 0) == 1) + jump_label_update(key); + jump_label_unlock(); + } +@@ -284,7 +284,7 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key) + return; + + guard(mutex)(&jump_label_mutex); +- if (atomic_cmpxchg(&key->enabled, 1, 0)) ++ if (atomic_cmpxchg(&key->enabled, 1, 0) == 1) + jump_label_update(key); + else + WARN_ON_ONCE(!static_key_slow_try_dec(key)); +diff --git a/kernel/kcov.c b/kernel/kcov.c +index 9f4affae4fad4..72d9aa6fb50c3 100644 +--- a/kernel/kcov.c ++++ b/kernel/kcov.c +@@ -161,6 +161,15 @@ static void kcov_remote_area_put(struct kcov_remote_area *area, + kmsan_unpoison_memory(&area->list, sizeof(area->list)); + } + ++/* ++ * Unlike in_serving_softirq(), this function returns false when called during ++ * a hardirq or an NMI that happened in the softirq context. ++ */ ++static inline bool in_softirq_really(void) ++{ ++ return in_serving_softirq() && !in_hardirq() && !in_nmi(); ++} ++ + static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t) + { + unsigned int mode; +@@ -170,7 +179,7 @@ static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_stru + * so we ignore code executed in interrupts, unless we are in a remote + * coverage collection section in a softirq. + */ +- if (!in_task() && !(in_serving_softirq() && t->kcov_softirq)) ++ if (!in_task() && !(in_softirq_really() && t->kcov_softirq)) + return false; + mode = READ_ONCE(t->kcov_mode); + /* +@@ -848,7 +857,7 @@ void kcov_remote_start(u64 handle) + + if (WARN_ON(!kcov_check_handle(handle, true, true, true))) + return; +- if (!in_task() && !in_serving_softirq()) ++ if (!in_task() && !in_softirq_really()) + return; + + local_lock_irqsave(&kcov_percpu_data.lock, flags); +@@ -990,7 +999,7 @@ void kcov_remote_stop(void) + int sequence; + unsigned long flags; + +- if (!in_task() && !in_serving_softirq()) ++ if (!in_task() && !in_softirq_really()) + return; + + local_lock_irqsave(&kcov_percpu_data.lock, flags); +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index add63428c0b40..c10954bd84448 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -1558,8 +1558,8 @@ static bool is_cfi_preamble_symbol(unsigned long addr) + if (lookup_symbol_name(addr, symbuf)) + return false; + +- return str_has_prefix("__cfi_", symbuf) || +- str_has_prefix("__pfx_", symbuf); ++ return str_has_prefix(symbuf, "__cfi_") || ++ str_has_prefix(symbuf, "__pfx_"); + } + + static int check_kprobe_address_safe(struct kprobe *p, +diff --git a/kernel/module/main.c b/kernel/module/main.c +index 34d9e718c2c7d..b00e31721a73e 100644 +--- a/kernel/module/main.c ++++ b/kernel/module/main.c +@@ -3081,7 +3081,7 @@ static bool idempotent(struct idempotent *u, const void *cookie) + struct idempotent *existing; + bool first; + +- u->ret = 0; ++ u->ret = -EINTR; + u->cookie = cookie; + init_completion(&u->complete); + +@@ -3117,7 +3117,7 @@ static int idempotent_complete(struct idempotent *u, int ret) + hlist_for_each_entry_safe(pos, next, head, entry) { + if (pos->cookie != cookie) + continue; +- hlist_del(&pos->entry); ++ hlist_del_init(&pos->entry); + pos->ret = ret; + complete(&pos->complete); + } +@@ -3125,6 +3125,28 @@ static int idempotent_complete(struct idempotent *u, int ret) + return ret; + } + ++/* ++ * Wait for the idempotent worker. ++ * ++ * If we get interrupted, we need to remove ourselves from the ++ * the idempotent list, and the completion may still come in. ++ * ++ * The 'idem_lock' protects against the race, and 'idem.ret' was ++ * initialized to -EINTR and is thus always the right return ++ * value even if the idempotent work then completes between ++ * the wait_for_completion and the cleanup. ++ */ ++static int idempotent_wait_for_completion(struct idempotent *u) ++{ ++ if (wait_for_completion_interruptible(&u->complete)) { ++ spin_lock(&idem_lock); ++ if (!hlist_unhashed(&u->entry)) ++ hlist_del(&u->entry); ++ spin_unlock(&idem_lock); ++ } ++ return u->ret; ++} ++ + static int init_module_from_file(struct file *f, const char __user * uargs, int flags) + { + struct load_info info = { }; +@@ -3160,15 +3182,16 @@ static int idempotent_init_module(struct file *f, const char __user * uargs, int + if (!f || !(f->f_mode & FMODE_READ)) + return -EBADF; + +- /* See if somebody else is doing the operation? */ +- if (idempotent(&idem, file_inode(f))) { +- wait_for_completion(&idem.complete); +- return idem.ret; ++ /* Are we the winners of the race and get to do this? */ ++ if (!idempotent(&idem, file_inode(f))) { ++ int ret = init_module_from_file(f, uargs, flags); ++ return idempotent_complete(&idem, ret); + } + +- /* Otherwise, we'll do it and complete others */ +- return idempotent_complete(&idem, +- init_module_from_file(f, uargs, flags)); ++ /* ++ * Somebody else won the race and is loading the module. ++ */ ++ return idempotent_wait_for_completion(&idem); + } + + SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) +diff --git a/kernel/padata.c b/kernel/padata.c +index c974568f65f5d..29545dd6dd53d 100644 +--- a/kernel/padata.c ++++ b/kernel/padata.c +@@ -516,6 +516,13 @@ void __init padata_do_multithreaded(struct padata_mt_job *job) + ps.chunk_size = max(ps.chunk_size, job->min_chunk); + ps.chunk_size = roundup(ps.chunk_size, job->align); + ++ /* ++ * chunk_size can be 0 if the caller sets min_chunk to 0. So force it ++ * to at least 1 to prevent divide-by-0 panic in padata_mt_helper().` ++ */ ++ if (!ps.chunk_size) ++ ps.chunk_size = 1U; ++ + list_for_each_entry(pw, &works, pw_list) + queue_work(system_unbound_wq, &pw->pw_work); + +diff --git a/kernel/profile.c b/kernel/profile.c +index 8a77769bc4b4c..984f819b701c9 100644 +--- a/kernel/profile.c ++++ b/kernel/profile.c +@@ -57,20 +57,11 @@ static DEFINE_MUTEX(profile_flip_mutex); + int profile_setup(char *str) + { + static const char schedstr[] = "schedule"; +- static const char sleepstr[] = "sleep"; + static const char kvmstr[] = "kvm"; + const char *select = NULL; + int par; + +- if (!strncmp(str, sleepstr, strlen(sleepstr))) { +-#ifdef CONFIG_SCHEDSTATS +- force_schedstat_enabled(); +- prof_on = SLEEP_PROFILING; +- select = sleepstr; +-#else +- pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n"); +-#endif /* CONFIG_SCHEDSTATS */ +- } else if (!strncmp(str, schedstr, strlen(schedstr))) { ++ if (!strncmp(str, schedstr, strlen(schedstr))) { + prof_on = SCHED_PROFILING; + select = schedstr; + } else if (!strncmp(str, kvmstr, strlen(kvmstr))) { +diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c +index 781146600aa49..46612fb15fc6d 100644 +--- a/kernel/rcu/rcutorture.c ++++ b/kernel/rcu/rcutorture.c +@@ -2592,7 +2592,7 @@ static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) + spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); + rfcpp = rfp->rcu_fwd_cb_tail; + rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; +- WRITE_ONCE(*rfcpp, rfcp); ++ smp_store_release(rfcpp, rfcp); + WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); + i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); + if (i >= ARRAY_SIZE(rfp->n_launders_hist)) +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c +index 8cf6a6fef7965..583cc29080764 100644 +--- a/kernel/rcu/tree.c ++++ b/kernel/rcu/tree.c +@@ -4595,11 +4595,15 @@ void rcutree_migrate_callbacks(int cpu) + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); + bool needwake; + +- if (rcu_rdp_is_offloaded(rdp) || +- rcu_segcblist_empty(&rdp->cblist)) +- return; /* No callbacks to migrate. */ ++ if (rcu_rdp_is_offloaded(rdp)) ++ return; + + raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags); ++ if (rcu_segcblist_empty(&rdp->cblist)) { ++ raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); ++ return; /* No callbacks to migrate. */ ++ } ++ + WARN_ON_ONCE(rcu_rdp_cpu_online(rdp)); + rcu_barrier_entrain(rdp); + my_rdp = this_cpu_ptr(&rcu_data); +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 92e4afeb71add..97571d390f184 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -9596,6 +9596,30 @@ void set_rq_offline(struct rq *rq) + } + } + ++static inline void sched_set_rq_online(struct rq *rq, int cpu) ++{ ++ struct rq_flags rf; ++ ++ rq_lock_irqsave(rq, &rf); ++ if (rq->rd) { ++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); ++ set_rq_online(rq); ++ } ++ rq_unlock_irqrestore(rq, &rf); ++} ++ ++static inline void sched_set_rq_offline(struct rq *rq, int cpu) ++{ ++ struct rq_flags rf; ++ ++ rq_lock_irqsave(rq, &rf); ++ if (rq->rd) { ++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); ++ set_rq_offline(rq); ++ } ++ rq_unlock_irqrestore(rq, &rf); ++} ++ + /* + * used to mark begin/end of suspend/resume: + */ +@@ -9646,10 +9670,25 @@ static int cpuset_cpu_inactive(unsigned int cpu) + return 0; + } + ++static inline void sched_smt_present_inc(int cpu) ++{ ++#ifdef CONFIG_SCHED_SMT ++ if (cpumask_weight(cpu_smt_mask(cpu)) == 2) ++ static_branch_inc_cpuslocked(&sched_smt_present); ++#endif ++} ++ ++static inline void sched_smt_present_dec(int cpu) ++{ ++#ifdef CONFIG_SCHED_SMT ++ if (cpumask_weight(cpu_smt_mask(cpu)) == 2) ++ static_branch_dec_cpuslocked(&sched_smt_present); ++#endif ++} ++ + int sched_cpu_activate(unsigned int cpu) + { + struct rq *rq = cpu_rq(cpu); +- struct rq_flags rf; + + /* + * Clear the balance_push callback and prepare to schedule +@@ -9657,13 +9696,10 @@ int sched_cpu_activate(unsigned int cpu) + */ + balance_push_set(cpu, false); + +-#ifdef CONFIG_SCHED_SMT + /* + * When going up, increment the number of cores with SMT present. + */ +- if (cpumask_weight(cpu_smt_mask(cpu)) == 2) +- static_branch_inc_cpuslocked(&sched_smt_present); +-#endif ++ sched_smt_present_inc(cpu); + set_cpu_active(cpu, true); + + if (sched_smp_initialized) { +@@ -9681,12 +9717,7 @@ int sched_cpu_activate(unsigned int cpu) + * 2) At runtime, if cpuset_cpu_active() fails to rebuild the + * domains. + */ +- rq_lock_irqsave(rq, &rf); +- if (rq->rd) { +- BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); +- set_rq_online(rq); +- } +- rq_unlock_irqrestore(rq, &rf); ++ sched_set_rq_online(rq, cpu); + + return 0; + } +@@ -9694,7 +9725,6 @@ int sched_cpu_activate(unsigned int cpu) + int sched_cpu_deactivate(unsigned int cpu) + { + struct rq *rq = cpu_rq(cpu); +- struct rq_flags rf; + int ret; + + /* +@@ -9725,20 +9755,14 @@ int sched_cpu_deactivate(unsigned int cpu) + */ + synchronize_rcu(); + +- rq_lock_irqsave(rq, &rf); +- if (rq->rd) { +- BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); +- set_rq_offline(rq); +- } +- rq_unlock_irqrestore(rq, &rf); ++ sched_set_rq_offline(rq, cpu); + +-#ifdef CONFIG_SCHED_SMT + /* + * When going down, decrement the number of cores with SMT present. + */ +- if (cpumask_weight(cpu_smt_mask(cpu)) == 2) +- static_branch_dec_cpuslocked(&sched_smt_present); ++ sched_smt_present_dec(cpu); + ++#ifdef CONFIG_SCHED_SMT + sched_core_cpu_deactivate(cpu); + #endif + +@@ -9748,6 +9772,8 @@ int sched_cpu_deactivate(unsigned int cpu) + sched_update_numa(cpu, false); + ret = cpuset_cpu_inactive(cpu); + if (ret) { ++ sched_smt_present_inc(cpu); ++ sched_set_rq_online(rq, cpu); + balance_push_set(cpu, false); + set_cpu_active(cpu, true); + sched_update_numa(cpu, true); +diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c +index af7952f12e6cf..b453f8a6a7c76 100644 +--- a/kernel/sched/cputime.c ++++ b/kernel/sched/cputime.c +@@ -595,6 +595,12 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, + } + + stime = mul_u64_u64_div_u64(stime, rtime, stime + utime); ++ /* ++ * Because mul_u64_u64_div_u64() can approximate on some ++ * achitectures; enforce the constraint that: a*b/(b+c) <= a. ++ */ ++ if (unlikely(stime > rtime)) ++ stime = rtime; + + update: + /* +diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c +index 857f837f52cbe..966f4eacfe51d 100644 +--- a/kernel/sched/stats.c ++++ b/kernel/sched/stats.c +@@ -92,16 +92,6 @@ void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p, + + trace_sched_stat_blocked(p, delta); + +- /* +- * Blocking time is in units of nanosecs, so shift by +- * 20 to get a milliseconds-range estimation of the +- * amount of time that the task spent sleeping: +- */ +- if (unlikely(prof_on == SLEEP_PROFILING)) { +- profile_hits(SLEEP_PROFILING, +- (void *)get_wchan(p), +- delta >> 20); +- } + account_scheduler_latency(p, delta >> 10, 0); + } + } +diff --git a/kernel/time/clocksource-wdtest.c b/kernel/time/clocksource-wdtest.c +index df922f49d171b..d06185e054ea2 100644 +--- a/kernel/time/clocksource-wdtest.c ++++ b/kernel/time/clocksource-wdtest.c +@@ -104,8 +104,8 @@ static void wdtest_ktime_clocksource_reset(void) + static int wdtest_func(void *arg) + { + unsigned long j1, j2; ++ int i, max_retries; + char *s; +- int i; + + schedule_timeout_uninterruptible(holdoff * HZ); + +@@ -139,18 +139,19 @@ static int wdtest_func(void *arg) + WARN_ON_ONCE(time_before(j2, j1 + NSEC_PER_USEC)); + + /* Verify tsc-like stability with various numbers of errors injected. */ +- for (i = 0; i <= max_cswd_read_retries + 1; i++) { +- if (i <= 1 && i < max_cswd_read_retries) ++ max_retries = clocksource_get_max_watchdog_retry(); ++ for (i = 0; i <= max_retries + 1; i++) { ++ if (i <= 1 && i < max_retries) + s = ""; +- else if (i <= max_cswd_read_retries) ++ else if (i <= max_retries) + s = ", expect message"; + else + s = ", expect clock skew"; +- pr_info("--- Watchdog with %dx error injection, %lu retries%s.\n", i, max_cswd_read_retries, s); ++ pr_info("--- Watchdog with %dx error injection, %d retries%s.\n", i, max_retries, s); + WRITE_ONCE(wdtest_ktime_read_ndelays, i); + schedule_timeout_uninterruptible(2 * HZ); + WARN_ON_ONCE(READ_ONCE(wdtest_ktime_read_ndelays)); +- WARN_ON_ONCE((i <= max_cswd_read_retries) != ++ WARN_ON_ONCE((i <= max_retries) != + !(clocksource_wdtest_ktime.flags & CLOCK_SOURCE_UNSTABLE)); + wdtest_ktime_clocksource_reset(); + } +diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c +index 3052b1f1168e2..3260bbe98894b 100644 +--- a/kernel/time/clocksource.c ++++ b/kernel/time/clocksource.c +@@ -210,9 +210,6 @@ void clocksource_mark_unstable(struct clocksource *cs) + spin_unlock_irqrestore(&watchdog_lock, flags); + } + +-ulong max_cswd_read_retries = 2; +-module_param(max_cswd_read_retries, ulong, 0644); +-EXPORT_SYMBOL_GPL(max_cswd_read_retries); + static int verify_n_cpus = 8; + module_param(verify_n_cpus, int, 0644); + +@@ -224,11 +221,12 @@ enum wd_read_status { + + static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow) + { +- unsigned int nretries; ++ unsigned int nretries, max_retries; + u64 wd_end, wd_end2, wd_delta; + int64_t wd_delay, wd_seq_delay; + +- for (nretries = 0; nretries <= max_cswd_read_retries; nretries++) { ++ max_retries = clocksource_get_max_watchdog_retry(); ++ for (nretries = 0; nretries <= max_retries; nretries++) { + local_irq_disable(); + *wdnow = watchdog->read(watchdog); + *csnow = cs->read(cs); +@@ -240,7 +238,7 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, + wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, + watchdog->shift); + if (wd_delay <= WATCHDOG_MAX_SKEW) { +- if (nretries > 1 || nretries >= max_cswd_read_retries) { ++ if (nretries > 1 && nretries >= max_retries) { + pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n", + smp_processor_id(), watchdog->name, nretries); + } +diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c +index 406dccb79c2b6..8d2dd214ec682 100644 +--- a/kernel/time/ntp.c ++++ b/kernel/time/ntp.c +@@ -727,17 +727,16 @@ static inline void process_adjtimex_modes(const struct __kernel_timex *txc, + } + + if (txc->modes & ADJ_MAXERROR) +- time_maxerror = txc->maxerror; ++ time_maxerror = clamp(txc->maxerror, 0, NTP_PHASE_LIMIT); + + if (txc->modes & ADJ_ESTERROR) +- time_esterror = txc->esterror; ++ time_esterror = clamp(txc->esterror, 0, NTP_PHASE_LIMIT); + + if (txc->modes & ADJ_TIMECONST) { +- time_constant = txc->constant; ++ time_constant = clamp(txc->constant, 0, MAXTC); + if (!(time_status & STA_NANO)) + time_constant += 4; +- time_constant = min(time_constant, (long)MAXTC); +- time_constant = max(time_constant, 0l); ++ time_constant = clamp(time_constant, 0, MAXTC); + } + + if (txc->modes & ADJ_TAI && +diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c +index b4843099a8da7..ed58eebb4e8f4 100644 +--- a/kernel/time/tick-broadcast.c ++++ b/kernel/time/tick-broadcast.c +@@ -1141,7 +1141,6 @@ void tick_broadcast_switch_to_oneshot(void) + #ifdef CONFIG_HOTPLUG_CPU + void hotplug_cpu__broadcast_tick_pull(int deadcpu) + { +- struct tick_device *td = this_cpu_ptr(&tick_cpu_device); + struct clock_event_device *bc; + unsigned long flags; + +@@ -1167,6 +1166,8 @@ void hotplug_cpu__broadcast_tick_pull(int deadcpu) + * device to avoid the starvation. + */ + if (tick_check_broadcast_expired()) { ++ struct tick_device *td = this_cpu_ptr(&tick_cpu_device); ++ + cpumask_clear_cpu(smp_processor_id(), tick_broadcast_force_mask); + tick_program_event(td->evtdev->next_event, 1); + } +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c +index 8aab7ed414907..11b7000d5e1d4 100644 +--- a/kernel/time/timekeeping.c ++++ b/kernel/time/timekeeping.c +@@ -2476,7 +2476,7 @@ int do_adjtimex(struct __kernel_timex *txc) + clock_set |= timekeeping_advance(TK_ADV_FREQ); + + if (clock_set) +- clock_was_set(CLOCK_REALTIME); ++ clock_was_set(CLOCK_SET_WALL); + + ntp_notify_cmos_timer(); + +diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c +index a4dcf0f243521..3a56e7c8aa4f6 100644 +--- a/kernel/trace/tracing_map.c ++++ b/kernel/trace/tracing_map.c +@@ -454,7 +454,7 @@ static struct tracing_map_elt *get_free_elt(struct tracing_map *map) + struct tracing_map_elt *elt = NULL; + int idx; + +- idx = atomic_inc_return(&map->next_elt); ++ idx = atomic_fetch_add_unless(&map->next_elt, 1, map->max_elts); + if (idx < map->max_elts) { + elt = *(TRACING_MAP_ELT(map->elts, idx)); + if (map->ops && map->ops->elt_init) +@@ -699,7 +699,7 @@ void tracing_map_clear(struct tracing_map *map) + { + unsigned int i; + +- atomic_set(&map->next_elt, -1); ++ atomic_set(&map->next_elt, 0); + atomic64_set(&map->hits, 0); + atomic64_set(&map->drops, 0); + +@@ -783,7 +783,7 @@ struct tracing_map *tracing_map_create(unsigned int map_bits, + + map->map_bits = map_bits; + map->max_elts = (1 << map_bits); +- atomic_set(&map->next_elt, -1); ++ atomic_set(&map->next_elt, 0); + + map->map_size = (1 << (map_bits + 1)); + map->ops = ops; +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 79fbd6ddec49f..7ac2877e76629 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -37,6 +37,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -601,6 +602,9 @@ static unsigned long __thp_get_unmapped_area(struct file *filp, + loff_t off_align = round_up(off, size); + unsigned long len_pad, ret; + ++ if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall()) ++ return 0; ++ + if (off_end <= off_align || (off_end - off_align) < size) + return 0; + +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index a480affd475bf..fb7a531fce717 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -1769,13 +1769,6 @@ static void __update_and_free_hugetlb_folio(struct hstate *h, + return; + } + +- /* +- * Move PageHWPoison flag from head page to the raw error pages, +- * which makes any healthy subpages reusable. +- */ +- if (unlikely(folio_test_hwpoison(folio))) +- folio_clear_hugetlb_hwpoison(folio); +- + /* + * If vmemmap pages were allocated above, then we need to clear the + * hugetlb destructor under the hugetlb lock. +@@ -1786,6 +1779,13 @@ static void __update_and_free_hugetlb_folio(struct hstate *h, + spin_unlock_irq(&hugetlb_lock); + } + ++ /* ++ * Move PageHWPoison flag from head page to the raw error pages, ++ * which makes any healthy subpages reusable. ++ */ ++ if (unlikely(folio_test_hwpoison(folio))) ++ folio_clear_hugetlb_hwpoison(folio); ++ + /* + * Non-gigantic pages demoted from CMA allocated gigantic pages + * need to be given back to CMA in free_gigantic_folio. +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index dd854cc65fd9d..fd1b707f5de40 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -5167,11 +5167,28 @@ static struct cftype mem_cgroup_legacy_files[] = { + + #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1) + static DEFINE_IDR(mem_cgroup_idr); ++static DEFINE_SPINLOCK(memcg_idr_lock); ++ ++static int mem_cgroup_alloc_id(void) ++{ ++ int ret; ++ ++ idr_preload(GFP_KERNEL); ++ spin_lock(&memcg_idr_lock); ++ ret = idr_alloc(&mem_cgroup_idr, NULL, 1, MEM_CGROUP_ID_MAX + 1, ++ GFP_NOWAIT); ++ spin_unlock(&memcg_idr_lock); ++ idr_preload_end(); ++ return ret; ++} + + static void mem_cgroup_id_remove(struct mem_cgroup *memcg) + { + if (memcg->id.id > 0) { ++ spin_lock(&memcg_idr_lock); + idr_remove(&mem_cgroup_idr, memcg->id.id); ++ spin_unlock(&memcg_idr_lock); ++ + memcg->id.id = 0; + } + } +@@ -5294,8 +5311,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void) + if (!memcg) + return ERR_PTR(error); + +- memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, +- 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL); ++ memcg->id.id = mem_cgroup_alloc_id(); + if (memcg->id.id < 0) { + error = memcg->id.id; + goto fail; +@@ -5430,7 +5446,9 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css) + * publish it here at the end of onlining. This matches the + * regular ID destruction during offlining. + */ ++ spin_lock(&memcg_idr_lock); + idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); ++ spin_unlock(&memcg_idr_lock); + + return 0; + offline_kmem: +diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c +index 6dab0c99c82c7..38fee34887d8a 100644 +--- a/net/bluetooth/hci_sync.c ++++ b/net/bluetooth/hci_sync.c +@@ -2905,6 +2905,20 @@ static int hci_passive_scan_sync(struct hci_dev *hdev) + } else if (hci_is_adv_monitoring(hdev)) { + window = hdev->le_scan_window_adv_monitor; + interval = hdev->le_scan_int_adv_monitor; ++ ++ /* Disable duplicates filter when scanning for advertisement ++ * monitor for the following reasons. ++ * ++ * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm ++ * controllers ignore RSSI_Sampling_Period when the duplicates ++ * filter is enabled. ++ * ++ * For SW pattern filtering, when we're not doing interleaved ++ * scanning, it is necessary to disable duplicates filter, ++ * otherwise hosts can only receive one advertisement and it's ++ * impossible to know if a peer is still in range. ++ */ ++ filter_dups = LE_SCAN_FILTER_DUP_DISABLE; + } else { + window = hdev->le_scan_window; + interval = hdev->le_scan_interval; +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c +index 1164c6d927281..2651cc2d5c283 100644 +--- a/net/bluetooth/l2cap_core.c ++++ b/net/bluetooth/l2cap_core.c +@@ -6775,6 +6775,7 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, + bt_cb(skb)->l2cap.psm = psm; + + if (!chan->ops->recv(chan, skb)) { ++ l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + return; + } +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c +index 38373b4fb7ddf..c38244d60ff86 100644 +--- a/net/bridge/br_multicast.c ++++ b/net/bridge/br_multicast.c +@@ -2044,16 +2044,14 @@ void br_multicast_del_port(struct net_bridge_port *port) + { + struct net_bridge *br = port->br; + struct net_bridge_port_group *pg; +- HLIST_HEAD(deleted_head); + struct hlist_node *n; + + /* Take care of the remaining groups, only perm ones should be left */ + spin_lock_bh(&br->multicast_lock); + hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) + br_multicast_find_del_pg(br, pg); +- hlist_move_list(&br->mcast_gc_list, &deleted_head); + spin_unlock_bh(&br->multicast_lock); +- br_multicast_gc(&deleted_head); ++ flush_work(&br->mcast_gc_work); + br_multicast_port_ctx_deinit(&port->multicast_ctx); + free_percpu(port->mcast_stats); + } +diff --git a/net/core/link_watch.c b/net/core/link_watch.c +index cb43f5aebfbcc..cf867f6e38bf1 100644 +--- a/net/core/link_watch.c ++++ b/net/core/link_watch.c +@@ -153,9 +153,9 @@ static void linkwatch_schedule_work(int urgent) + * override the existing timer. + */ + if (test_bit(LW_URGENT, &linkwatch_flags)) +- mod_delayed_work(system_wq, &linkwatch_work, 0); ++ mod_delayed_work(system_unbound_wq, &linkwatch_work, 0); + else +- schedule_delayed_work(&linkwatch_work, delay); ++ queue_delayed_work(system_unbound_wq, &linkwatch_work, delay); + } + + +diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c +index 8311c38267b55..69e6012ae82fb 100644 +--- a/net/ipv4/tcp_offload.c ++++ b/net/ipv4/tcp_offload.c +@@ -73,6 +73,9 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, + if (thlen < sizeof(*th)) + goto out; + ++ if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb))) ++ goto out; ++ + if (!pskb_may_pull(skb, thlen)) + goto out; + +diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c +index e5971890d637d..9cb13a50011ef 100644 +--- a/net/ipv4/udp_offload.c ++++ b/net/ipv4/udp_offload.c +@@ -278,6 +278,10 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, + if (gso_skb->len <= sizeof(*uh) + mss) + return ERR_PTR(-EINVAL); + ++ if (unlikely(skb_checksum_start(gso_skb) != ++ skb_transport_header(gso_skb))) ++ return ERR_PTR(-EINVAL); ++ + if (skb_gso_ok(gso_skb, features | NETIF_F_GSO_ROBUST)) { + /* Packet is from an untrusted source, reset gso_segs. */ + skb_shinfo(gso_skb)->gso_segs = DIV_ROUND_UP(gso_skb->len - sizeof(*uh), +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index f97cb368e5a81..db8d0e1bf69ff 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -1122,6 +1122,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk, + from = rt ? rcu_dereference(rt->from) : NULL; + err = ip6_route_get_saddr(net, from, &fl6->daddr, + sk ? inet6_sk(sk)->srcprefs : 0, ++ fl6->flowi6_l3mdev, + &fl6->saddr); + rcu_read_unlock(); + +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index eb3afaee62e8f..49ef5623c55e2 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -5678,7 +5678,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, + goto nla_put_failure; + } else if (dest) { + struct in6_addr saddr_buf; +- if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 && ++ if (ip6_route_get_saddr(net, rt, dest, 0, 0, &saddr_buf) == 0 && + nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) + goto nla_put_failure; + } +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c +index 8d21ff25f1602..70da78ab95202 100644 +--- a/net/l2tp/l2tp_core.c ++++ b/net/l2tp/l2tp_core.c +@@ -88,6 +88,11 @@ + /* Default trace flags */ + #define L2TP_DEFAULT_DEBUG_FLAGS 0 + ++#define L2TP_DEPTH_NESTING 2 ++#if L2TP_DEPTH_NESTING == SINGLE_DEPTH_NESTING ++#error "L2TP requires its own lockdep subclass" ++#endif ++ + /* Private data stored for received packets in the skb. + */ + struct l2tp_skb_cb { +@@ -1041,7 +1046,13 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns + IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); + nf_reset_ct(skb); + +- bh_lock_sock_nested(sk); ++ /* L2TP uses its own lockdep subclass to avoid lockdep splats caused by ++ * nested socket calls on the same lockdep socket class. This can ++ * happen when data from a user socket is routed over l2tp, which uses ++ * another userspace socket. ++ */ ++ spin_lock_nested(&sk->sk_lock.slock, L2TP_DEPTH_NESTING); ++ + if (sock_owned_by_user(sk)) { + kfree_skb(skb); + ret = NET_XMIT_DROP; +@@ -1093,7 +1104,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns + ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl); + + out_unlock: +- bh_unlock_sock(sk); ++ spin_unlock(&sk->sk_lock.slock); + + return ret; + } +diff --git a/net/mptcp/options.c b/net/mptcp/options.c +index 85aafa94cc8ab..604724cca887f 100644 +--- a/net/mptcp/options.c ++++ b/net/mptcp/options.c +@@ -958,7 +958,8 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk, + + if (subflow->remote_key_valid && + (((mp_opt->suboptions & OPTION_MPTCP_DSS) && mp_opt->use_ack) || +- ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) && !mp_opt->echo))) { ++ ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) && ++ (!mp_opt->echo || subflow->mp_join)))) { + /* subflows are fully established as soon as we get any + * additional ack, including ADD_ADDR. + */ +diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c +index d8da5374d9e13..cf70a376398be 100644 +--- a/net/mptcp/pm.c ++++ b/net/mptcp/pm.c +@@ -427,6 +427,18 @@ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) + return mptcp_pm_nl_get_local_id(msk, &skc_local); + } + ++bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc) ++{ ++ struct mptcp_addr_info skc_local; ++ ++ mptcp_local_address((struct sock_common *)skc, &skc_local); ++ ++ if (mptcp_pm_is_userspace(msk)) ++ return mptcp_userspace_pm_is_backup(msk, &skc_local); ++ ++ return mptcp_pm_nl_is_backup(msk, &skc_local); ++} ++ + int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id, + u8 *flags, int *ifindex) + { +diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c +index db621933b2035..2c49182c674f3 100644 +--- a/net/mptcp/pm_netlink.c ++++ b/net/mptcp/pm_netlink.c +@@ -353,7 +353,7 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, + add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr); + + if (add_entry) { +- if (mptcp_pm_is_kernel(msk)) ++ if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk))) + return false; + + sk_reset_timer(sk, &add_entry->add_timer, +@@ -520,8 +520,8 @@ __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info, + + static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) + { ++ struct mptcp_pm_addr_entry *local, *signal_and_subflow = NULL; + struct sock *sk = (struct sock *)msk; +- struct mptcp_pm_addr_entry *local; + unsigned int add_addr_signal_max; + unsigned int local_addr_max; + struct pm_nl_pernet *pernet; +@@ -563,8 +563,6 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) + + /* check first for announce */ + if (msk->pm.add_addr_signaled < add_addr_signal_max) { +- local = select_signal_address(pernet, msk); +- + /* due to racing events on both ends we can reach here while + * previous add address is still running: if we invoke now + * mptcp_pm_announce_addr(), that will fail and the +@@ -575,16 +573,26 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) + if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL)) + return; + +- if (local) { +- if (mptcp_pm_alloc_anno_list(msk, &local->addr)) { +- __clear_bit(local->addr.id, msk->pm.id_avail_bitmap); +- msk->pm.add_addr_signaled++; +- mptcp_pm_announce_addr(msk, &local->addr, false); +- mptcp_pm_nl_addr_send_ack(msk); +- } +- } ++ local = select_signal_address(pernet, msk); ++ if (!local) ++ goto subflow; ++ ++ /* If the alloc fails, we are on memory pressure, not worth ++ * continuing, and trying to create subflows. ++ */ ++ if (!mptcp_pm_alloc_anno_list(msk, &local->addr)) ++ return; ++ ++ __clear_bit(local->addr.id, msk->pm.id_avail_bitmap); ++ msk->pm.add_addr_signaled++; ++ mptcp_pm_announce_addr(msk, &local->addr, false); ++ mptcp_pm_nl_addr_send_ack(msk); ++ ++ if (local->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) ++ signal_and_subflow = local; + } + ++subflow: + /* check if should create a new subflow */ + while (msk->pm.local_addr_used < local_addr_max && + msk->pm.subflows < subflows_max) { +@@ -592,9 +600,14 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) + bool fullmesh; + int i, nr; + +- local = select_local_address(pernet, msk); +- if (!local) +- break; ++ if (signal_and_subflow) { ++ local = signal_and_subflow; ++ signal_and_subflow = NULL; ++ } else { ++ local = select_local_address(pernet, msk); ++ if (!local) ++ break; ++ } + + fullmesh = !!(local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH); + +@@ -1109,6 +1122,24 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc + return ret; + } + ++bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc) ++{ ++ struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); ++ struct mptcp_pm_addr_entry *entry; ++ bool backup = false; ++ ++ rcu_read_lock(); ++ list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { ++ if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) { ++ backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP); ++ break; ++ } ++ } ++ rcu_read_unlock(); ++ ++ return backup; ++} ++ + #define MPTCP_PM_CMD_GRP_OFFSET 0 + #define MPTCP_PM_EV_GRP_OFFSET 1 + +@@ -1341,8 +1372,8 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info) + if (ret < 0) + return ret; + +- if (addr.addr.port && !(addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) { +- GENL_SET_ERR_MSG(info, "flags must have signal when using port"); ++ if (addr.addr.port && !address_use_port(&addr)) { ++ GENL_SET_ERR_MSG(info, "flags must have signal and not subflow when using port"); + return -EINVAL; + } + +diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c +index f36f87a62dd0d..6738bad048cec 100644 +--- a/net/mptcp/pm_userspace.c ++++ b/net/mptcp/pm_userspace.c +@@ -157,6 +157,24 @@ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, + return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry, true); + } + ++bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk, ++ struct mptcp_addr_info *skc) ++{ ++ struct mptcp_pm_addr_entry *entry; ++ bool backup = false; ++ ++ spin_lock_bh(&msk->pm.lock); ++ list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) { ++ if (mptcp_addresses_equal(&entry->addr, skc, false)) { ++ backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP); ++ break; ++ } ++ } ++ spin_unlock_bh(&msk->pm.lock); ++ ++ return backup; ++} ++ + int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info) + { + struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN]; +diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h +index c28ac5dfd0b58..0201b1004a3b9 100644 +--- a/net/mptcp/protocol.h ++++ b/net/mptcp/protocol.h +@@ -1032,6 +1032,9 @@ bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, + int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc); + int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc); + int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc); ++bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc); ++bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc); ++bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc); + + static inline u8 subflow_get_local_id(const struct mptcp_subflow_context *subflow) + { +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c +index bc1efc1787720..927c2d5997dc7 100644 +--- a/net/mptcp/subflow.c ++++ b/net/mptcp/subflow.c +@@ -100,6 +100,7 @@ static struct mptcp_sock *subflow_token_join_request(struct request_sock *req) + return NULL; + } + subflow_req->local_id = local_id; ++ subflow_req->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)req); + + return msk; + } +@@ -601,6 +602,8 @@ static int subflow_chk_local_id(struct sock *sk) + return err; + + subflow_set_local_id(subflow, err); ++ subflow->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)sk); ++ + return 0; + } + +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index dd044a47c8723..ea139fca74cb9 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -3743,6 +3743,15 @@ static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *r + nf_tables_rule_destroy(ctx, rule); + } + ++/** nft_chain_validate - loop detection and hook validation ++ * ++ * @ctx: context containing call depth and base chain ++ * @chain: chain to validate ++ * ++ * Walk through the rules of the given chain and chase all jumps/gotos ++ * and set lookups until either the jump limit is hit or all reachable ++ * chains have been validated. ++ */ + int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain) + { + struct nft_expr *expr, *last; +@@ -3764,6 +3773,9 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain) + if (!expr->ops->validate) + continue; + ++ /* This may call nft_chain_validate() recursively, ++ * callers that do so must increment ctx->level. ++ */ + err = expr->ops->validate(ctx, expr, &data); + if (err < 0) + return err; +@@ -10621,146 +10633,6 @@ int nft_chain_validate_hooks(const struct nft_chain *chain, + } + EXPORT_SYMBOL_GPL(nft_chain_validate_hooks); + +-/* +- * Loop detection - walk through the ruleset beginning at the destination chain +- * of a new jump until either the source chain is reached (loop) or all +- * reachable chains have been traversed. +- * +- * The loop check is performed whenever a new jump verdict is added to an +- * expression or verdict map or a verdict map is bound to a new chain. +- */ +- +-static int nf_tables_check_loops(const struct nft_ctx *ctx, +- const struct nft_chain *chain); +- +-static int nft_check_loops(const struct nft_ctx *ctx, +- const struct nft_set_ext *ext) +-{ +- const struct nft_data *data; +- int ret; +- +- data = nft_set_ext_data(ext); +- switch (data->verdict.code) { +- case NFT_JUMP: +- case NFT_GOTO: +- ret = nf_tables_check_loops(ctx, data->verdict.chain); +- break; +- default: +- ret = 0; +- break; +- } +- +- return ret; +-} +- +-static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx, +- struct nft_set *set, +- const struct nft_set_iter *iter, +- struct nft_set_elem *elem) +-{ +- const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); +- +- if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) && +- *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END) +- return 0; +- +- return nft_check_loops(ctx, ext); +-} +- +-static int nft_set_catchall_loops(const struct nft_ctx *ctx, +- struct nft_set *set) +-{ +- u8 genmask = nft_genmask_next(ctx->net); +- struct nft_set_elem_catchall *catchall; +- struct nft_set_ext *ext; +- int ret = 0; +- +- list_for_each_entry_rcu(catchall, &set->catchall_list, list) { +- ext = nft_set_elem_ext(set, catchall->elem); +- if (!nft_set_elem_active(ext, genmask)) +- continue; +- +- ret = nft_check_loops(ctx, ext); +- if (ret < 0) +- return ret; +- } +- +- return ret; +-} +- +-static int nf_tables_check_loops(const struct nft_ctx *ctx, +- const struct nft_chain *chain) +-{ +- const struct nft_rule *rule; +- const struct nft_expr *expr, *last; +- struct nft_set *set; +- struct nft_set_binding *binding; +- struct nft_set_iter iter; +- +- if (ctx->chain == chain) +- return -ELOOP; +- +- if (fatal_signal_pending(current)) +- return -EINTR; +- +- list_for_each_entry(rule, &chain->rules, list) { +- nft_rule_for_each_expr(expr, last, rule) { +- struct nft_immediate_expr *priv; +- const struct nft_data *data; +- int err; +- +- if (strcmp(expr->ops->type->name, "immediate")) +- continue; +- +- priv = nft_expr_priv(expr); +- if (priv->dreg != NFT_REG_VERDICT) +- continue; +- +- data = &priv->data; +- switch (data->verdict.code) { +- case NFT_JUMP: +- case NFT_GOTO: +- err = nf_tables_check_loops(ctx, +- data->verdict.chain); +- if (err < 0) +- return err; +- break; +- default: +- break; +- } +- } +- } +- +- list_for_each_entry(set, &ctx->table->sets, list) { +- if (!nft_is_active_next(ctx->net, set)) +- continue; +- if (!(set->flags & NFT_SET_MAP) || +- set->dtype != NFT_DATA_VERDICT) +- continue; +- +- list_for_each_entry(binding, &set->bindings, list) { +- if (!(binding->flags & NFT_SET_MAP) || +- binding->chain != chain) +- continue; +- +- iter.genmask = nft_genmask_next(ctx->net); +- iter.skip = 0; +- iter.count = 0; +- iter.err = 0; +- iter.fn = nf_tables_loop_check_setelem; +- +- set->ops->walk(ctx, set, &iter); +- if (!iter.err) +- iter.err = nft_set_catchall_loops(ctx, set); +- +- if (iter.err < 0) +- return iter.err; +- } +- } +- +- return 0; +-} +- + /** + * nft_parse_u32_check - fetch u32 attribute and check for maximum value + * +@@ -10873,7 +10745,7 @@ static int nft_validate_register_store(const struct nft_ctx *ctx, + if (data != NULL && + (data->verdict.code == NFT_GOTO || + data->verdict.code == NFT_JUMP)) { +- err = nf_tables_check_loops(ctx, data->verdict.chain); ++ err = nft_chain_validate(ctx, data->verdict.chain); + if (err < 0) + return err; + } +diff --git a/net/sctp/input.c b/net/sctp/input.c +index 17fcaa9b0df94..a8a254a5008e5 100644 +--- a/net/sctp/input.c ++++ b/net/sctp/input.c +@@ -735,15 +735,19 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep) + struct sock *sk = ep->base.sk; + struct net *net = sock_net(sk); + struct sctp_hashbucket *head; ++ int err = 0; + + ep->hashent = sctp_ep_hashfn(net, ep->base.bind_addr.port); + head = &sctp_ep_hashtable[ep->hashent]; + ++ write_lock(&head->lock); + if (sk->sk_reuseport) { + bool any = sctp_is_ep_boundall(sk); + struct sctp_endpoint *ep2; + struct list_head *list; +- int cnt = 0, err = 1; ++ int cnt = 0; ++ ++ err = 1; + + list_for_each(list, &ep->base.bind_addr.address_list) + cnt++; +@@ -761,24 +765,24 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep) + if (!err) { + err = reuseport_add_sock(sk, sk2, any); + if (err) +- return err; ++ goto out; + break; + } else if (err < 0) { +- return err; ++ goto out; + } + } + + if (err) { + err = reuseport_alloc(sk, any); + if (err) +- return err; ++ goto out; + } + } + +- write_lock(&head->lock); + hlist_add_head(&ep->node, &head->chain); ++out: + write_unlock(&head->lock); +- return 0; ++ return err; + } + + /* Add an endpoint to the hash. Local BH-safe. */ +@@ -803,10 +807,9 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep) + + head = &sctp_ep_hashtable[ep->hashent]; + ++ write_lock(&head->lock); + if (rcu_access_pointer(sk->sk_reuseport_cb)) + reuseport_detach_sock(sk); +- +- write_lock(&head->lock); + hlist_del_init(&ep->node); + write_unlock(&head->lock); + } +diff --git a/net/smc/smc_stats.h b/net/smc/smc_stats.h +index 9d32058db2b5d..e19177ce40923 100644 +--- a/net/smc/smc_stats.h ++++ b/net/smc/smc_stats.h +@@ -19,7 +19,7 @@ + + #include "smc_clc.h" + +-#define SMC_MAX_FBACK_RSN_CNT 30 ++#define SMC_MAX_FBACK_RSN_CNT 36 + + enum { + SMC_BUF_8K, +diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c +index 6debf4fd42d4e..cef623ea15060 100644 +--- a/net/sunrpc/sched.c ++++ b/net/sunrpc/sched.c +@@ -369,8 +369,10 @@ static void rpc_make_runnable(struct workqueue_struct *wq, + if (RPC_IS_ASYNC(task)) { + INIT_WORK(&task->u.tk_work, rpc_async_schedule); + queue_work(wq, &task->u.tk_work); +- } else ++ } else { ++ smp_mb__after_atomic(); + wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); ++ } + } + + /* +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index a551be47cb6c6..b7f62442d8268 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -1483,6 +1483,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, + struct unix_sock *u = unix_sk(sk), *newu, *otheru; + struct net *net = sock_net(sk); + struct sk_buff *skb = NULL; ++ unsigned char state; + long timeo; + int err; + +@@ -1529,7 +1530,6 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, + goto out; + } + +- /* Latch state of peer */ + unix_state_lock(other); + + /* Apparently VFS overslept socket death. Retry. */ +@@ -1559,37 +1559,21 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, + goto restart; + } + +- /* Latch our state. +- +- It is tricky place. We need to grab our state lock and cannot +- drop lock on peer. It is dangerous because deadlock is +- possible. Connect to self case and simultaneous +- attempt to connect are eliminated by checking socket +- state. other is TCP_LISTEN, if sk is TCP_LISTEN we +- check this before attempt to grab lock. +- +- Well, and we have to recheck the state after socket locked. ++ /* self connect and simultaneous connect are eliminated ++ * by rejecting TCP_LISTEN socket to avoid deadlock. + */ +- switch (READ_ONCE(sk->sk_state)) { +- case TCP_CLOSE: +- /* This is ok... continue with connect */ +- break; +- case TCP_ESTABLISHED: +- /* Socket is already connected */ +- err = -EISCONN; +- goto out_unlock; +- default: +- err = -EINVAL; ++ state = READ_ONCE(sk->sk_state); ++ if (unlikely(state != TCP_CLOSE)) { ++ err = state == TCP_ESTABLISHED ? -EISCONN : -EINVAL; + goto out_unlock; + } + + unix_state_lock_nested(sk, U_LOCK_SECOND); + +- if (sk->sk_state != TCP_CLOSE) { ++ if (unlikely(sk->sk_state != TCP_CLOSE)) { ++ err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EINVAL; + unix_state_unlock(sk); +- unix_state_unlock(other); +- sock_put(other); +- goto restart; ++ goto out_unlock; + } + + err = security_unix_stream_connect(sk, other, newsk); +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index 8f8f077e6cd40..be5c42d6ffbea 100644 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -3398,6 +3398,33 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, + if (chandef.chan != cur_chan) + return -EBUSY; + ++ /* only allow this for regular channel widths */ ++ switch (wdev->links[link_id].ap.chandef.width) { ++ case NL80211_CHAN_WIDTH_20_NOHT: ++ case NL80211_CHAN_WIDTH_20: ++ case NL80211_CHAN_WIDTH_40: ++ case NL80211_CHAN_WIDTH_80: ++ case NL80211_CHAN_WIDTH_80P80: ++ case NL80211_CHAN_WIDTH_160: ++ case NL80211_CHAN_WIDTH_320: ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ switch (chandef.width) { ++ case NL80211_CHAN_WIDTH_20_NOHT: ++ case NL80211_CHAN_WIDTH_20: ++ case NL80211_CHAN_WIDTH_40: ++ case NL80211_CHAN_WIDTH_80: ++ case NL80211_CHAN_WIDTH_80P80: ++ case NL80211_CHAN_WIDTH_160: ++ case NL80211_CHAN_WIDTH_320: ++ break; ++ default: ++ return -EINVAL; ++ } ++ + result = rdev_set_ap_chanwidth(rdev, dev, link_id, + &chandef); + if (result) +@@ -4446,10 +4473,7 @@ static void get_key_callback(void *c, struct key_params *params) + struct nlattr *key; + struct get_key_cookie *cookie = c; + +- if ((params->key && +- nla_put(cookie->msg, NL80211_ATTR_KEY_DATA, +- params->key_len, params->key)) || +- (params->seq && ++ if ((params->seq && + nla_put(cookie->msg, NL80211_ATTR_KEY_SEQ, + params->seq_len, params->seq)) || + (params->cipher && +@@ -4461,10 +4485,7 @@ static void get_key_callback(void *c, struct key_params *params) + if (!key) + goto nla_put_failure; + +- if ((params->key && +- nla_put(cookie->msg, NL80211_KEY_DATA, +- params->key_len, params->key)) || +- (params->seq && ++ if ((params->seq && + nla_put(cookie->msg, NL80211_KEY_SEQ, + params->seq_len, params->seq)) || + (params->cipher && +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c +index 038db8902c9ed..82c0d3a3327ab 100644 +--- a/sound/pci/hda/patch_hdmi.c ++++ b/sound/pci/hda/patch_hdmi.c +@@ -1989,6 +1989,8 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid) + } + + static const struct snd_pci_quirk force_connect_list[] = { ++ SND_PCI_QUIRK(0x103c, 0x83e2, "HP EliteDesk 800 G4", 1), ++ SND_PCI_QUIRK(0x103c, 0x83ef, "HP MP9 G4 Retail System AMS", 1), + SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1), + SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1), + SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1), +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 0b33a00771450..82dcea2b78000 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -10360,6 +10360,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK), + SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), ++ SND_PCI_QUIRK(0xf111, 0x0009, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), + + #if 0 + /* Below is a quirk table taken from the old code. +diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c +index 36dddf230c2c4..d597e59863ee3 100644 +--- a/sound/soc/amd/yc/acp6x-mach.c ++++ b/sound/soc/amd/yc/acp6x-mach.c +@@ -409,6 +409,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { + DMI_MATCH(DMI_BOARD_NAME, "8A43"), + } + }, ++ { ++ .driver_data = &acp6x_card, ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "HP"), ++ DMI_MATCH(DMI_BOARD_NAME, "8A44"), ++ } ++ }, + { + .driver_data = &acp6x_card, + .matches = { +diff --git a/sound/soc/codecs/wcd938x-sdw.c b/sound/soc/codecs/wcd938x-sdw.c +index a1f04010da95f..132c1d24f8f6e 100644 +--- a/sound/soc/codecs/wcd938x-sdw.c ++++ b/sound/soc/codecs/wcd938x-sdw.c +@@ -1252,12 +1252,12 @@ static int wcd9380_probe(struct sdw_slave *pdev, + pdev->prop.lane_control_support = true; + pdev->prop.simple_clk_stop_capable = true; + if (wcd->is_tx) { +- pdev->prop.source_ports = GENMASK(WCD938X_MAX_SWR_PORTS, 0); ++ pdev->prop.source_ports = GENMASK(WCD938X_MAX_SWR_PORTS - 1, 0); + pdev->prop.src_dpn_prop = wcd938x_dpn_prop; + wcd->ch_info = &wcd938x_sdw_tx_ch_info[0]; + pdev->prop.wake_capable = true; + } else { +- pdev->prop.sink_ports = GENMASK(WCD938X_MAX_SWR_PORTS, 0); ++ pdev->prop.sink_ports = GENMASK(WCD938X_MAX_SWR_PORTS - 1, 0); + pdev->prop.sink_dpn_prop = wcd938x_dpn_prop; + wcd->ch_info = &wcd938x_sdw_rx_ch_info[0]; + } +diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c +index 1253695bebd86..53b828f681020 100644 +--- a/sound/soc/codecs/wsa881x.c ++++ b/sound/soc/codecs/wsa881x.c +@@ -1152,7 +1152,7 @@ static int wsa881x_probe(struct sdw_slave *pdev, + wsa881x->sconfig.frame_rate = 48000; + wsa881x->sconfig.direction = SDW_DATA_DIR_RX; + wsa881x->sconfig.type = SDW_STREAM_PDM; +- pdev->prop.sink_ports = GENMASK(WSA881X_MAX_SWR_PORTS, 0); ++ pdev->prop.sink_ports = GENMASK(WSA881X_MAX_SWR_PORTS - 1, 0); + pdev->prop.sink_dpn_prop = wsa_sink_dpn_prop; + pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY; + pdev->prop.clk_stop_mode1 = true; +diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c +index a2e86ef7d18f5..2169d93989841 100644 +--- a/sound/soc/codecs/wsa883x.c ++++ b/sound/soc/codecs/wsa883x.c +@@ -1399,7 +1399,15 @@ static int wsa883x_probe(struct sdw_slave *pdev, + wsa883x->sconfig.direction = SDW_DATA_DIR_RX; + wsa883x->sconfig.type = SDW_STREAM_PDM; + +- pdev->prop.sink_ports = GENMASK(WSA883X_MAX_SWR_PORTS, 0); ++ /** ++ * Port map index starts with 0, however the data port for this codec ++ * are from index 1 ++ */ ++ if (of_property_read_u32_array(dev->of_node, "qcom,port-mapping", &pdev->m_port_map[1], ++ WSA883X_MAX_SWR_PORTS)) ++ dev_dbg(dev, "Static Port mapping not specified\n"); ++ ++ pdev->prop.sink_ports = GENMASK(WSA883X_MAX_SWR_PORTS - 1, 0); + pdev->prop.simple_clk_stop_capable = true; + pdev->prop.sink_dpn_prop = wsa_sink_dpn_prop; + pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY; +diff --git a/sound/soc/codecs/wsa884x.c b/sound/soc/codecs/wsa884x.c +index 993d76b18b536..1cd52fab7b40d 100644 +--- a/sound/soc/codecs/wsa884x.c ++++ b/sound/soc/codecs/wsa884x.c +@@ -1858,7 +1858,15 @@ static int wsa884x_probe(struct sdw_slave *pdev, + wsa884x->sconfig.direction = SDW_DATA_DIR_RX; + wsa884x->sconfig.type = SDW_STREAM_PDM; + +- pdev->prop.sink_ports = GENMASK(WSA884X_MAX_SWR_PORTS, 0); ++ /** ++ * Port map index starts with 0, however the data port for this codec ++ * are from index 1 ++ */ ++ if (of_property_read_u32_array(dev->of_node, "qcom,port-mapping", &pdev->m_port_map[1], ++ WSA884X_MAX_SWR_PORTS)) ++ dev_dbg(dev, "Static Port mapping not specified\n"); ++ ++ pdev->prop.sink_ports = GENMASK(WSA884X_MAX_SWR_PORTS - 1, 0); + pdev->prop.simple_clk_stop_capable = true; + pdev->prop.sink_dpn_prop = wsa884x_sink_dpn_prop; + pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY; +diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c +index 94b169a5493b5..5218e40aeb1bb 100644 +--- a/sound/soc/meson/axg-fifo.c ++++ b/sound/soc/meson/axg-fifo.c +@@ -207,25 +207,18 @@ static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id) + status = FIELD_GET(STATUS1_INT_STS, status); + axg_fifo_ack_irq(fifo, status); + +- /* Use the thread to call period elapsed on nonatomic links */ +- if (status & FIFO_INT_COUNT_REPEAT) +- return IRQ_WAKE_THREAD; ++ if (status & ~FIFO_INT_COUNT_REPEAT) ++ dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n", ++ status); + +- dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n", +- status); ++ if (status & FIFO_INT_COUNT_REPEAT) { ++ snd_pcm_period_elapsed(ss); ++ return IRQ_HANDLED; ++ } + + return IRQ_NONE; + } + +-static irqreturn_t axg_fifo_pcm_irq_block_thread(int irq, void *dev_id) +-{ +- struct snd_pcm_substream *ss = dev_id; +- +- snd_pcm_period_elapsed(ss); +- +- return IRQ_HANDLED; +-} +- + int axg_fifo_pcm_open(struct snd_soc_component *component, + struct snd_pcm_substream *ss) + { +@@ -251,8 +244,9 @@ int axg_fifo_pcm_open(struct snd_soc_component *component, + if (ret) + return ret; + +- ret = request_threaded_irq(fifo->irq, axg_fifo_pcm_irq_block, +- axg_fifo_pcm_irq_block_thread, ++ /* Use the threaded irq handler only with non-atomic links */ ++ ret = request_threaded_irq(fifo->irq, NULL, ++ axg_fifo_pcm_irq_block, + IRQF_ONESHOT, dev_name(dev), ss); + if (ret) + return ret; +diff --git a/sound/soc/sof/mediatek/mt8195/mt8195.c b/sound/soc/sof/mediatek/mt8195/mt8195.c +index 7d6a568556ea4..b5b4ea854da4b 100644 +--- a/sound/soc/sof/mediatek/mt8195/mt8195.c ++++ b/sound/soc/sof/mediatek/mt8195/mt8195.c +@@ -624,7 +624,7 @@ static struct snd_sof_dsp_ops sof_mt8195_ops = { + static struct snd_sof_of_mach sof_mt8195_machs[] = { + { + .compatible = "google,tomato", +- .sof_tplg_filename = "sof-mt8195-mt6359-rt1019-rt5682-dts.tplg" ++ .sof_tplg_filename = "sof-mt8195-mt6359-rt1019-rt5682.tplg" + }, { + .compatible = "mediatek,mt8195", + .sof_tplg_filename = "sof-mt8195.tplg" +diff --git a/sound/soc/sti/sti_uniperif.c b/sound/soc/sti/sti_uniperif.c +index 2c21a86421e66..cc9a8122b9bc2 100644 +--- a/sound/soc/sti/sti_uniperif.c ++++ b/sound/soc/sti/sti_uniperif.c +@@ -352,7 +352,7 @@ static int sti_uniperiph_resume(struct snd_soc_component *component) + return ret; + } + +-static int sti_uniperiph_dai_probe(struct snd_soc_dai *dai) ++int sti_uniperiph_dai_probe(struct snd_soc_dai *dai) + { + struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai); + struct sti_uniperiph_dai *dai_data = &priv->dai_data; +diff --git a/sound/soc/sti/uniperif.h b/sound/soc/sti/uniperif.h +index 2a5de328501c1..74e51f0ff85c8 100644 +--- a/sound/soc/sti/uniperif.h ++++ b/sound/soc/sti/uniperif.h +@@ -1380,6 +1380,7 @@ int uni_reader_init(struct platform_device *pdev, + struct uniperif *reader); + + /* common */ ++int sti_uniperiph_dai_probe(struct snd_soc_dai *dai); + int sti_uniperiph_dai_set_fmt(struct snd_soc_dai *dai, + unsigned int fmt); + +diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c +index dd9013c476649..6d1ce030963c6 100644 +--- a/sound/soc/sti/uniperif_player.c ++++ b/sound/soc/sti/uniperif_player.c +@@ -1038,6 +1038,7 @@ static const struct snd_soc_dai_ops uni_player_dai_ops = { + .startup = uni_player_startup, + .shutdown = uni_player_shutdown, + .prepare = uni_player_prepare, ++ .probe = sti_uniperiph_dai_probe, + .trigger = uni_player_trigger, + .hw_params = sti_uniperiph_dai_hw_params, + .set_fmt = sti_uniperiph_dai_set_fmt, +diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c +index 065c5f0d1f5f0..05ea2b794eb92 100644 +--- a/sound/soc/sti/uniperif_reader.c ++++ b/sound/soc/sti/uniperif_reader.c +@@ -401,6 +401,7 @@ static const struct snd_soc_dai_ops uni_reader_dai_ops = { + .startup = uni_reader_startup, + .shutdown = uni_reader_shutdown, + .prepare = uni_reader_prepare, ++ .probe = sti_uniperiph_dai_probe, + .trigger = uni_reader_trigger, + .hw_params = sti_uniperiph_dai_hw_params, + .set_fmt = sti_uniperiph_dai_set_fmt, +diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c +index f4437015d43a7..9df49a880b750 100644 +--- a/sound/usb/line6/driver.c ++++ b/sound/usb/line6/driver.c +@@ -286,12 +286,14 @@ static void line6_data_received(struct urb *urb) + { + struct usb_line6 *line6 = (struct usb_line6 *)urb->context; + struct midi_buffer *mb = &line6->line6midi->midibuf_in; ++ unsigned long flags; + int done; + + if (urb->status == -ESHUTDOWN) + return; + + if (line6->properties->capabilities & LINE6_CAP_CONTROL_MIDI) { ++ spin_lock_irqsave(&line6->line6midi->lock, flags); + done = + line6_midibuf_write(mb, urb->transfer_buffer, urb->actual_length); + +@@ -300,12 +302,15 @@ static void line6_data_received(struct urb *urb) + dev_dbg(line6->ifcdev, "%d %d buffer overflow - message skipped\n", + done, urb->actual_length); + } ++ spin_unlock_irqrestore(&line6->line6midi->lock, flags); + + for (;;) { ++ spin_lock_irqsave(&line6->line6midi->lock, flags); + done = + line6_midibuf_read(mb, line6->buffer_message, + LINE6_MIDI_MESSAGE_MAXLEN, + LINE6_MIDIBUF_READ_RX); ++ spin_unlock_irqrestore(&line6->line6midi->lock, flags); + + if (done <= 0) + break; +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h +index 5d72dc8441cbb..af1b8cf5a9883 100644 +--- a/sound/usb/quirks-table.h ++++ b/sound/usb/quirks-table.h +@@ -2594,6 +2594,10 @@ YAMAHA_DEVICE(0x7010, "UB99"), + } + }, + ++/* Stanton ScratchAmp */ ++{ USB_DEVICE(0x103d, 0x0100) }, ++{ USB_DEVICE(0x103d, 0x0101) }, ++ + /* Novation EMS devices */ + { + USB_DEVICE_VENDOR_SPEC(0x1235, 0x0001), +diff --git a/tools/arch/arm64/include/asm/cputype.h b/tools/arch/arm64/include/asm/cputype.h +index 5f6f84837a490..329d41f8c9237 100644 +--- a/tools/arch/arm64/include/asm/cputype.h ++++ b/tools/arch/arm64/include/asm/cputype.h +@@ -84,6 +84,9 @@ + #define ARM_CPU_PART_CORTEX_X2 0xD48 + #define ARM_CPU_PART_NEOVERSE_N2 0xD49 + #define ARM_CPU_PART_CORTEX_A78C 0xD4B ++#define ARM_CPU_PART_NEOVERSE_V2 0xD4F ++#define ARM_CPU_PART_CORTEX_X4 0xD82 ++#define ARM_CPU_PART_NEOVERSE_V3 0xD84 + + #define APM_CPU_PART_POTENZA 0x000 + +@@ -153,6 +156,9 @@ + #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2) + #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) + #define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C) ++#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2) ++#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4) ++#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3) + #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) + #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) + #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) +diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c +index b15b343ebb6b1..9adcda7f1fedc 100644 +--- a/tools/testing/selftests/bpf/prog_tests/send_signal.c ++++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c +@@ -156,7 +156,8 @@ static void test_send_signal_tracepoint(bool signal_thread) + static void test_send_signal_perf(bool signal_thread) + { + struct perf_event_attr attr = { +- .sample_period = 1, ++ .freq = 1, ++ .sample_freq = 1000, + .type = PERF_TYPE_SOFTWARE, + .config = PERF_COUNT_SW_CPU_CLOCK, + }; +diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile +index 292359a542429..8b2b9bb8bad10 100644 +--- a/tools/testing/selftests/mm/Makefile ++++ b/tools/testing/selftests/mm/Makefile +@@ -101,7 +101,7 @@ endif + + endif + +-ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 powerpc riscv64 s390x sparc64 x86_64)) ++ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 powerpc riscv64 s390x sparc64 x86_64 s390)) + TEST_GEN_FILES += va_high_addr_switch + TEST_GEN_FILES += virtual_address_range + TEST_GEN_FILES += write_to_hugetlbfs +diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh +index a2dae2a3a93e0..b16b8278c4cea 100755 +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh +@@ -812,7 +812,7 @@ pm_nl_check_endpoint() + done + + if [ -z "$id" ]; then +- test_fail "bad test - missing endpoint id" ++ fail_test "bad test - missing endpoint id" + return + fi + +@@ -1559,18 +1559,28 @@ chk_add_nr() + local add_nr=$1 + local echo_nr=$2 + local port_nr=${3:-0} +- local syn_nr=${4:-$port_nr} +- local syn_ack_nr=${5:-$port_nr} +- local ack_nr=${6:-$port_nr} +- local mis_syn_nr=${7:-0} +- local mis_ack_nr=${8:-0} ++ local ns_invert=${4:-""} ++ local syn_nr=$port_nr ++ local syn_ack_nr=$port_nr ++ local ack_nr=$port_nr ++ local mis_syn_nr=0 ++ local mis_ack_nr=0 ++ local ns_tx=$ns1 ++ local ns_rx=$ns2 ++ local extra_msg="" + local count + local timeout + +- timeout=$(ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout) ++ if [[ $ns_invert = "invert" ]]; then ++ ns_tx=$ns2 ++ ns_rx=$ns1 ++ extra_msg="invert" ++ fi ++ ++ timeout=$(ip netns exec ${ns_tx} sysctl -n net.mptcp.add_addr_timeout) + + print_check "add" +- count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtAddAddr") ++ count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtAddAddr") + if [ -z "$count" ]; then + print_skip + # if the test configured a short timeout tolerate greater then expected +@@ -1582,7 +1592,7 @@ chk_add_nr() + fi + + print_check "echo" +- count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtEchoAdd") ++ count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtEchoAdd") + if [ -z "$count" ]; then + print_skip + elif [ "$count" != "$echo_nr" ]; then +@@ -1593,7 +1603,7 @@ chk_add_nr() + + if [ $port_nr -gt 0 ]; then + print_check "pt" +- count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtPortAdd") ++ count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtPortAdd") + if [ -z "$count" ]; then + print_skip + elif [ "$count" != "$port_nr" ]; then +@@ -1603,7 +1613,7 @@ chk_add_nr() + fi + + print_check "syn" +- count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinPortSynRx") ++ count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPJoinPortSynRx") + if [ -z "$count" ]; then + print_skip + elif [ "$count" != "$syn_nr" ]; then +@@ -1614,7 +1624,7 @@ chk_add_nr() + fi + + print_check "synack" +- count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtMPJoinPortSynAckRx") ++ count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPJoinPortSynAckRx") + if [ -z "$count" ]; then + print_skip + elif [ "$count" != "$syn_ack_nr" ]; then +@@ -1625,7 +1635,7 @@ chk_add_nr() + fi + + print_check "ack" +- count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinPortAckRx") ++ count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPJoinPortAckRx") + if [ -z "$count" ]; then + print_skip + elif [ "$count" != "$ack_nr" ]; then +@@ -1636,7 +1646,7 @@ chk_add_nr() + fi + + print_check "syn" +- count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMismatchPortSynRx") ++ count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMismatchPortSynRx") + if [ -z "$count" ]; then + print_skip + elif [ "$count" != "$mis_syn_nr" ]; then +@@ -1647,7 +1657,7 @@ chk_add_nr() + fi + + print_check "ack" +- count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMismatchPortAckRx") ++ count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMismatchPortAckRx") + if [ -z "$count" ]; then + print_skip + elif [ "$count" != "$mis_ack_nr" ]; then +@@ -1657,6 +1667,8 @@ chk_add_nr() + print_ok + fi + fi ++ ++ print_info "$extra_msg" + } + + chk_add_tx_nr() +@@ -2121,6 +2133,21 @@ signal_address_tests() + chk_add_nr 1 1 + fi + ++ # uncommon: subflow and signal flags on the same endpoint ++ # or because the user wrongly picked both, but still expects the client ++ # to create additional subflows ++ if reset "subflow and signal together"; then ++ pm_nl_set_limits $ns1 0 2 ++ pm_nl_set_limits $ns2 0 2 ++ pm_nl_add_endpoint $ns2 10.0.3.2 flags signal,subflow ++ run_tests $ns1 $ns2 10.0.1.1 ++ chk_join_nr 1 1 1 ++ chk_add_nr 1 1 0 invert # only initiated by ns2 ++ chk_add_nr 0 0 0 # none initiated by ns1 ++ chk_rst_nr 0 0 invert # no RST sent by the client ++ chk_rst_nr 0 0 # no RST sent by the server ++ fi ++ + # accept and use add_addr with additional subflows + if reset "multiple subflows and signal"; then + pm_nl_set_limits $ns1 0 3 +diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh +index be97a7ed09503..f24bd2bf08311 100755 +--- a/tools/testing/selftests/net/mptcp/simult_flows.sh ++++ b/tools/testing/selftests/net/mptcp/simult_flows.sh +@@ -262,7 +262,7 @@ run_test() + do_transfer $small $large $time + lret=$? + mptcp_lib_result_code "${lret}" "${msg}" +- if [ $lret -ne 0 ] && ! mptcp_lib_subtest_is_flaky; then ++ if [ $lret -ne 0 ]; then + ret=$lret + [ $bail -eq 0 ] || exit $ret + fi +@@ -272,7 +272,7 @@ run_test() + do_transfer $large $small $time + lret=$? + mptcp_lib_result_code "${lret}" "${msg}" +- if [ $lret -ne 0 ] && ! mptcp_lib_subtest_is_flaky; then ++ if [ $lret -ne 0 ]; then + ret=$lret + [ $bail -eq 0 ] || exit $ret + fi +@@ -305,7 +305,7 @@ run_test 10 10 0 0 "balanced bwidth" + run_test 10 10 1 25 "balanced bwidth with unbalanced delay" + + # we still need some additional infrastructure to pass the following test-cases +-MPTCP_LIB_SUBTEST_FLAKY=1 run_test 10 3 0 0 "unbalanced bwidth" ++run_test 10 3 0 0 "unbalanced bwidth" + run_test 10 3 1 25 "unbalanced bwidth with unbalanced delay" + run_test 10 3 25 1 "unbalanced bwidth with opposed, unbalanced delay" + +diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh +index 12b50a4a881ac..89a82f6f140ef 100755 +--- a/tools/testing/selftests/rcutorture/bin/torture.sh ++++ b/tools/testing/selftests/rcutorture/bin/torture.sh +@@ -567,7 +567,7 @@ then + torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 tsc=watchdog" + torture_set "clocksourcewd-1" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 45s --configs TREE03 --kconfig "CONFIG_TEST_CLOCKSOURCE_WATCHDOG=y" --trust-make + +- torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 clocksource.max_cswd_read_retries=1 tsc=watchdog" ++ torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 tsc=watchdog" + torture_set "clocksourcewd-2" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 45s --configs TREE03 --kconfig "CONFIG_TEST_CLOCKSOURCE_WATCHDOG=y" --trust-make + + # In case our work is already done... diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.46-47.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.46-47.patch new file mode 100644 index 000000000000..830bd4b9a097 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.46-47.patch @@ -0,0 +1,4211 @@ +diff --git a/Documentation/bpf/map_lpm_trie.rst b/Documentation/bpf/map_lpm_trie.rst +index 74d64a30f50073..f9cd579496c9ce 100644 +--- a/Documentation/bpf/map_lpm_trie.rst ++++ b/Documentation/bpf/map_lpm_trie.rst +@@ -17,7 +17,7 @@ significant byte. + + LPM tries may be created with a maximum prefix length that is a multiple + of 8, in the range from 8 to 2048. The key used for lookup and update +-operations is a ``struct bpf_lpm_trie_key``, extended by ++operations is a ``struct bpf_lpm_trie_key_u8``, extended by + ``max_prefixlen/8`` bytes. + + - For IPv4 addresses the data length is 4 bytes +diff --git a/Documentation/mm/page_table_check.rst b/Documentation/mm/page_table_check.rst +index c12838ce6b8de2..c59f22eb6a0f9a 100644 +--- a/Documentation/mm/page_table_check.rst ++++ b/Documentation/mm/page_table_check.rst +@@ -14,7 +14,7 @@ Page table check performs extra verifications at the time when new pages become + accessible from the userspace by getting their page table entries (PTEs PMDs + etc.) added into the table. + +-In case of detected corruption, the kernel is crashed. There is a small ++In case of most detected corruption, the kernel is crashed. There is a small + performance and memory overhead associated with the page table check. Therefore, + it is disabled by default, but can be optionally enabled on systems where the + extra hardening outweighs the performance costs. Also, because page table check +@@ -22,6 +22,13 @@ is synchronous, it can help with debugging double map memory corruption issues, + by crashing kernel at the time wrong mapping occurs instead of later which is + often the case with memory corruptions bugs. + ++It can also be used to do page table entry checks over various flags, dump ++warnings when illegal combinations of entry flags are detected. Currently, ++userfaultfd is the only user of such to sanity check wr-protect bit against ++any writable flags. Illegal flag combinations will not directly cause data ++corruption in this case immediately, but that will cause read-only data to ++be writable, leading to corrupt when the page content is later modified. ++ + Double mapping detection logic + ============================== + +diff --git a/Makefile b/Makefile +index 77de99984c2f18..6b967e135c80f0 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 46 ++SUBLEVEL = 47 + EXTRAVERSION = + NAME = Hurr durr I'ma ninja sloth + +diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c +index 15aa9bad1c280b..ca0bf0b92ca09e 100644 +--- a/arch/arm64/kvm/hyp/pgtable.c ++++ b/arch/arm64/kvm/hyp/pgtable.c +@@ -523,7 +523,7 @@ static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx, + + kvm_clear_pte(ctx->ptep); + dsb(ishst); +- __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), ctx->level); ++ __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), 0); + } else { + if (ctx->end - ctx->addr < granule) + return -EINVAL; +@@ -861,9 +861,13 @@ static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx, + if (kvm_pte_valid(ctx->old)) { + kvm_clear_pte(ctx->ptep); + +- if (!stage2_unmap_defer_tlb_flush(pgt)) +- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, +- ctx->addr, ctx->level); ++ if (kvm_pte_table(ctx->old, ctx->level)) { ++ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ++ 0); ++ } else if (!stage2_unmap_defer_tlb_flush(pgt)) { ++ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ++ ctx->level); ++ } + } + + mm_ops->put_page(ctx->ptep); +diff --git a/arch/loongarch/include/uapi/asm/unistd.h b/arch/loongarch/include/uapi/asm/unistd.h +index fcb668984f0336..b344b1f917153b 100644 +--- a/arch/loongarch/include/uapi/asm/unistd.h ++++ b/arch/loongarch/include/uapi/asm/unistd.h +@@ -1,4 +1,5 @@ + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ ++#define __ARCH_WANT_NEW_STAT + #define __ARCH_WANT_SYS_CLONE + #define __ARCH_WANT_SYS_CLONE3 + +diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h +index e02b179ec65989..d03fe4fb41f43c 100644 +--- a/arch/x86/include/asm/pgtable.h ++++ b/arch/x86/include/asm/pgtable.h +@@ -387,23 +387,7 @@ static inline pte_t pte_wrprotect(pte_t pte) + #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP + static inline int pte_uffd_wp(pte_t pte) + { +- bool wp = pte_flags(pte) & _PAGE_UFFD_WP; +- +-#ifdef CONFIG_DEBUG_VM +- /* +- * Having write bit for wr-protect-marked present ptes is fatal, +- * because it means the uffd-wp bit will be ignored and write will +- * just go through. +- * +- * Use any chance of pgtable walking to verify this (e.g., when +- * page swapped out or being migrated for all purposes). It means +- * something is already wrong. Tell the admin even before the +- * process crashes. We also nail it with wrong pgtable setup. +- */ +- WARN_ON_ONCE(wp && pte_write(pte)); +-#endif +- +- return wp; ++ return pte_flags(pte) & _PAGE_UFFD_WP; + } + + static inline pte_t pte_mkuffd_wp(pte_t pte) +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index 77dbd516a05463..277bf0e8ed0918 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -941,8 +941,19 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) + &sense_key, &asc, &ascq); + ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq); + } else { +- /* ATA PASS-THROUGH INFORMATION AVAILABLE */ +- ata_scsi_set_sense(qc->dev, cmd, RECOVERED_ERROR, 0, 0x1D); ++ /* ++ * ATA PASS-THROUGH INFORMATION AVAILABLE ++ * ++ * Note: we are supposed to call ata_scsi_set_sense(), which ++ * respects the D_SENSE bit, instead of unconditionally ++ * generating the sense data in descriptor format. However, ++ * because hdparm, hddtemp, and udisks incorrectly assume sense ++ * data in descriptor format, without even looking at the ++ * RESPONSE CODE field in the returned sense data (to see which ++ * format the returned sense data is in), we are stuck with ++ * being bug compatible with older kernels. ++ */ ++ scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D); + } + } + +diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c +index 2776ca5fc33f39..b215b28cad7b76 100644 +--- a/drivers/isdn/mISDN/socket.c ++++ b/drivers/isdn/mISDN/socket.c +@@ -401,23 +401,23 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) + } + + static int data_sock_setsockopt(struct socket *sock, int level, int optname, +- sockptr_t optval, unsigned int len) ++ sockptr_t optval, unsigned int optlen) + { + struct sock *sk = sock->sk; + int err = 0, opt = 0; + + if (*debug & DEBUG_SOCKET) + printk(KERN_DEBUG "%s(%p, %d, %x, optval, %d)\n", __func__, sock, +- level, optname, len); ++ level, optname, optlen); + + lock_sock(sk); + + switch (optname) { + case MISDN_TIME_STAMP: +- if (copy_from_sockptr(&opt, optval, sizeof(int))) { +- err = -EFAULT; ++ err = copy_safe_from_sockptr(&opt, sizeof(opt), ++ optval, optlen); ++ if (err) + break; +- } + + if (opt) + _pms(sk)->cmask |= MISDN_TIME_STAMP; +diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c +index 22d83ac18eb735..fbf58012becdf2 100644 +--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c ++++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c +@@ -23,40 +23,11 @@ static int dvb_usb_force_pid_filter_usage; + module_param_named(force_pid_filter_usage, dvb_usb_force_pid_filter_usage, int, 0444); + MODULE_PARM_DESC(force_pid_filter_usage, "force all dvb-usb-devices to use a PID filter, if any (default: 0)."); + +-static int dvb_usb_check_bulk_endpoint(struct dvb_usb_device *d, u8 endpoint) +-{ +- if (endpoint) { +- int ret; +- +- ret = usb_pipe_type_check(d->udev, usb_sndbulkpipe(d->udev, endpoint)); +- if (ret) +- return ret; +- ret = usb_pipe_type_check(d->udev, usb_rcvbulkpipe(d->udev, endpoint)); +- if (ret) +- return ret; +- } +- return 0; +-} +- +-static void dvb_usb_clear_halt(struct dvb_usb_device *d, u8 endpoint) +-{ +- if (endpoint) { +- usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, endpoint)); +- usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, endpoint)); +- } +-} +- + static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs) + { + struct dvb_usb_adapter *adap; + int ret, n, o; + +- ret = dvb_usb_check_bulk_endpoint(d, d->props.generic_bulk_ctrl_endpoint); +- if (ret) +- return ret; +- ret = dvb_usb_check_bulk_endpoint(d, d->props.generic_bulk_ctrl_endpoint_response); +- if (ret) +- return ret; + for (n = 0; n < d->props.num_adapters; n++) { + adap = &d->adapter[n]; + adap->dev = d; +@@ -132,8 +103,10 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs) + * when reloading the driver w/o replugging the device + * sometimes a timeout occurs, this helps + */ +- dvb_usb_clear_halt(d, d->props.generic_bulk_ctrl_endpoint); +- dvb_usb_clear_halt(d, d->props.generic_bulk_ctrl_endpoint_response); ++ if (d->props.generic_bulk_ctrl_endpoint != 0) { ++ usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint)); ++ usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint)); ++ } + + return 0; + +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c +index ba8b6bd8233cad..96cca4ee470a4b 100644 +--- a/drivers/net/ppp/pppoe.c ++++ b/drivers/net/ppp/pppoe.c +@@ -1007,26 +1007,21 @@ static int pppoe_recvmsg(struct socket *sock, struct msghdr *m, + struct sk_buff *skb; + int error = 0; + +- if (sk->sk_state & PPPOX_BOUND) { +- error = -EIO; +- goto end; +- } ++ if (sk->sk_state & PPPOX_BOUND) ++ return -EIO; + + skb = skb_recv_datagram(sk, flags, &error); +- if (error < 0) +- goto end; ++ if (!skb) ++ return error; + +- if (skb) { +- total_len = min_t(size_t, total_len, skb->len); +- error = skb_copy_datagram_msg(skb, 0, m, total_len); +- if (error == 0) { +- consume_skb(skb); +- return total_len; +- } ++ total_len = min_t(size_t, total_len, skb->len); ++ error = skb_copy_datagram_msg(skb, 0, m, total_len); ++ if (error == 0) { ++ consume_skb(skb); ++ return total_len; + } + + kfree_skb(skb); +-end: + return error; + } + +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index 0fc7aa78b2e5b9..2c3f55877a1134 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -2931,6 +2931,13 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) + return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND; + } + ++ /* ++ * NVMe SSD drops off the PCIe bus after system idle ++ * for 10 hours on a Lenovo N60z board. ++ */ ++ if (dmi_match(DMI_BOARD_NAME, "LXKT-ZXEG-N6")) ++ return NVME_QUIRK_NO_APST; ++ + return 0; + } + +diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c +index c26545d71d39a3..cd6d5bbb4b9df5 100644 +--- a/fs/binfmt_flat.c ++++ b/fs/binfmt_flat.c +@@ -72,8 +72,10 @@ + + #ifdef CONFIG_BINFMT_FLAT_NO_DATA_START_OFFSET + #define DATA_START_OFFSET_WORDS (0) ++#define MAX_SHARED_LIBS_UPDATE (0) + #else + #define DATA_START_OFFSET_WORDS (MAX_SHARED_LIBS) ++#define MAX_SHARED_LIBS_UPDATE (MAX_SHARED_LIBS) + #endif + + struct lib_info { +@@ -880,7 +882,7 @@ static int load_flat_binary(struct linux_binprm *bprm) + return res; + + /* Update data segment pointers for all libraries */ +- for (i = 0; i < MAX_SHARED_LIBS; i++) { ++ for (i = 0; i < MAX_SHARED_LIBS_UPDATE; i++) { + if (!libinfo.lib_list[i].loaded) + continue; + for (j = 0; j < MAX_SHARED_LIBS; j++) { +diff --git a/fs/buffer.c b/fs/buffer.c +index 12e9a71c693d74..ecd8b47507ff80 100644 +--- a/fs/buffer.c ++++ b/fs/buffer.c +@@ -2179,6 +2179,8 @@ static void __block_commit_write(struct folio *folio, size_t from, size_t to) + struct buffer_head *bh, *head; + + bh = head = folio_buffers(folio); ++ if (!bh) ++ return; + blocksize = bh->b_size; + + block_start = 0; +diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c +index 5ee7d7bbb361ce..2fbf97077ce910 100644 +--- a/fs/cramfs/inode.c ++++ b/fs/cramfs/inode.c +@@ -495,7 +495,7 @@ static void cramfs_kill_sb(struct super_block *sb) + sb->s_mtd = NULL; + } else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV) && sb->s_bdev) { + sync_blockdev(sb->s_bdev); +- blkdev_put(sb->s_bdev, sb); ++ bdev_release(sb->s_bdev_handle); + } + kfree(sbi); + } +diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c +index d36b3963c0bf3c..aa59788a61e6e4 100644 +--- a/fs/erofs/decompressor.c ++++ b/fs/erofs/decompressor.c +@@ -248,15 +248,9 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, + if (ret != rq->outputsize) { + erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]", + ret, rq->inputsize, inputmargin, rq->outputsize); +- +- print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET, +- 16, 1, src + inputmargin, rq->inputsize, true); +- print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET, +- 16, 1, out, rq->outputsize, true); +- + if (ret >= 0) + memset(out + ret, 0, rq->outputsize - ret); +- ret = -EIO; ++ ret = -EFSCORRUPTED; + } else { + ret = 0; + } +diff --git a/fs/exec.c b/fs/exec.c +index 89a9017af7e86f..1cbbef281f8cfe 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -1609,6 +1609,7 @@ static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file) + unsigned int mode; + vfsuid_t vfsuid; + vfsgid_t vfsgid; ++ int err; + + if (!mnt_may_suid(file->f_path.mnt)) + return; +@@ -1625,12 +1626,17 @@ static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file) + /* Be careful if suid/sgid is set */ + inode_lock(inode); + +- /* reload atomically mode/uid/gid now that lock held */ ++ /* Atomically reload and check mode/uid/gid now that lock held. */ + mode = inode->i_mode; + vfsuid = i_uid_into_vfsuid(idmap, inode); + vfsgid = i_gid_into_vfsgid(idmap, inode); ++ err = inode_permission(idmap, inode, MAY_EXEC); + inode_unlock(inode); + ++ /* Did the exec bit vanish out from under us? Give up. */ ++ if (err) ++ return; ++ + /* We ignore suid/sgid if there are no mappings for them in the ns */ + if (!vfsuid_has_mapping(bprm->cred->user_ns, vfsuid) || + !vfsgid_has_mapping(bprm->cred->user_ns, vfsgid)) +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index cef119a2476bb4..a4ffd1acac6514 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -2966,23 +2966,29 @@ static int ext4_da_should_update_i_disksize(struct folio *folio, + + static int ext4_da_do_write_end(struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, +- struct page *page) ++ struct folio *folio) + { + struct inode *inode = mapping->host; + loff_t old_size = inode->i_size; + bool disksize_changed = false; + loff_t new_i_size; + ++ if (unlikely(!folio_buffers(folio))) { ++ folio_unlock(folio); ++ folio_put(folio); ++ return -EIO; ++ } + /* + * block_write_end() will mark the inode as dirty with I_DIRTY_PAGES + * flag, which all that's needed to trigger page writeback. + */ +- copied = block_write_end(NULL, mapping, pos, len, copied, page, NULL); ++ copied = block_write_end(NULL, mapping, pos, len, copied, ++ &folio->page, NULL); + new_i_size = pos + copied; + + /* +- * It's important to update i_size while still holding page lock, +- * because page writeout could otherwise come in and zero beyond ++ * It's important to update i_size while still holding folio lock, ++ * because folio writeout could otherwise come in and zero beyond + * i_size. + * + * Since we are holding inode lock, we are sure i_disksize <= +@@ -3000,14 +3006,14 @@ static int ext4_da_do_write_end(struct address_space *mapping, + + i_size_write(inode, new_i_size); + end = (new_i_size - 1) & (PAGE_SIZE - 1); +- if (copied && ext4_da_should_update_i_disksize(page_folio(page), end)) { ++ if (copied && ext4_da_should_update_i_disksize(folio, end)) { + ext4_update_i_disksize(inode, new_i_size); + disksize_changed = true; + } + } + +- unlock_page(page); +- put_page(page); ++ folio_unlock(folio); ++ folio_put(folio); + + if (old_size < pos) + pagecache_isize_extended(inode, old_size, pos); +@@ -3046,10 +3052,10 @@ static int ext4_da_write_end(struct file *file, + return ext4_write_inline_data_end(inode, pos, len, copied, + folio); + +- if (unlikely(copied < len) && !PageUptodate(page)) ++ if (unlikely(copied < len) && !folio_test_uptodate(folio)) + copied = 0; + +- return ext4_da_do_write_end(mapping, pos, len, copied, &folio->page); ++ return ext4_da_do_write_end(mapping, pos, len, copied, folio); + } + + /* +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c +index c58cbe9f7809c1..c368ff671d7739 100644 +--- a/fs/ext4/xattr.c ++++ b/fs/ext4/xattr.c +@@ -1571,46 +1571,49 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value, + /* + * Add value of the EA in an inode. + */ +-static int ext4_xattr_inode_lookup_create(handle_t *handle, struct inode *inode, +- const void *value, size_t value_len, +- struct inode **ret_inode) ++static struct inode *ext4_xattr_inode_lookup_create(handle_t *handle, ++ struct inode *inode, const void *value, size_t value_len) + { + struct inode *ea_inode; + u32 hash; + int err; + ++ /* Account inode & space to quota even if sharing... */ ++ err = ext4_xattr_inode_alloc_quota(inode, value_len); ++ if (err) ++ return ERR_PTR(err); ++ + hash = ext4_xattr_inode_hash(EXT4_SB(inode->i_sb), value, value_len); + ea_inode = ext4_xattr_inode_cache_find(inode, value, value_len, hash); + if (ea_inode) { + err = ext4_xattr_inode_inc_ref(handle, ea_inode); +- if (err) { +- iput(ea_inode); +- return err; +- } +- +- *ret_inode = ea_inode; +- return 0; ++ if (err) ++ goto out_err; ++ return ea_inode; + } + + /* Create an inode for the EA value */ + ea_inode = ext4_xattr_inode_create(handle, inode, hash); +- if (IS_ERR(ea_inode)) +- return PTR_ERR(ea_inode); ++ if (IS_ERR(ea_inode)) { ++ ext4_xattr_inode_free_quota(inode, NULL, value_len); ++ return ea_inode; ++ } + + err = ext4_xattr_inode_write(handle, ea_inode, value, value_len); + if (err) { + if (ext4_xattr_inode_dec_ref(handle, ea_inode)) + ext4_warning_inode(ea_inode, "cleanup dec ref error %d", err); +- iput(ea_inode); +- return err; ++ goto out_err; + } + + if (EA_INODE_CACHE(inode)) + mb_cache_entry_create(EA_INODE_CACHE(inode), GFP_NOFS, hash, + ea_inode->i_ino, true /* reusable */); +- +- *ret_inode = ea_inode; +- return 0; ++ return ea_inode; ++out_err: ++ iput(ea_inode); ++ ext4_xattr_inode_free_quota(inode, NULL, value_len); ++ return ERR_PTR(err); + } + + /* +@@ -1622,6 +1625,7 @@ static int ext4_xattr_inode_lookup_create(handle_t *handle, struct inode *inode, + static int ext4_xattr_set_entry(struct ext4_xattr_info *i, + struct ext4_xattr_search *s, + handle_t *handle, struct inode *inode, ++ struct inode *new_ea_inode, + bool is_block) + { + struct ext4_xattr_entry *last, *next; +@@ -1629,7 +1633,6 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, + size_t min_offs = s->end - s->base, name_len = strlen(i->name); + int in_inode = i->in_inode; + struct inode *old_ea_inode = NULL; +- struct inode *new_ea_inode = NULL; + size_t old_size, new_size; + int ret; + +@@ -1714,43 +1717,11 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, + old_ea_inode = NULL; + goto out; + } +- } +- if (i->value && in_inode) { +- WARN_ON_ONCE(!i->value_len); +- +- ret = ext4_xattr_inode_alloc_quota(inode, i->value_len); +- if (ret) +- goto out; +- +- ret = ext4_xattr_inode_lookup_create(handle, inode, i->value, +- i->value_len, +- &new_ea_inode); +- if (ret) { +- new_ea_inode = NULL; +- ext4_xattr_inode_free_quota(inode, NULL, i->value_len); +- goto out; +- } +- } + +- if (old_ea_inode) { + /* We are ready to release ref count on the old_ea_inode. */ + ret = ext4_xattr_inode_dec_ref(handle, old_ea_inode); +- if (ret) { +- /* Release newly required ref count on new_ea_inode. */ +- if (new_ea_inode) { +- int err; +- +- err = ext4_xattr_inode_dec_ref(handle, +- new_ea_inode); +- if (err) +- ext4_warning_inode(new_ea_inode, +- "dec ref new_ea_inode err=%d", +- err); +- ext4_xattr_inode_free_quota(inode, new_ea_inode, +- i->value_len); +- } ++ if (ret) + goto out; +- } + + ext4_xattr_inode_free_quota(inode, old_ea_inode, + le32_to_cpu(here->e_value_size)); +@@ -1874,7 +1845,6 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, + ret = 0; + out: + iput(old_ea_inode); +- iput(new_ea_inode); + return ret; + } + +@@ -1937,9 +1907,21 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, + size_t old_ea_inode_quota = 0; + unsigned int ea_ino; + +- + #define header(x) ((struct ext4_xattr_header *)(x)) + ++ /* If we need EA inode, prepare it before locking the buffer */ ++ if (i->value && i->in_inode) { ++ WARN_ON_ONCE(!i->value_len); ++ ++ ea_inode = ext4_xattr_inode_lookup_create(handle, inode, ++ i->value, i->value_len); ++ if (IS_ERR(ea_inode)) { ++ error = PTR_ERR(ea_inode); ++ ea_inode = NULL; ++ goto cleanup; ++ } ++ } ++ + if (s->base) { + int offset = (char *)s->here - bs->bh->b_data; + +@@ -1948,6 +1930,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, + EXT4_JTR_NONE); + if (error) + goto cleanup; ++ + lock_buffer(bs->bh); + + if (header(s->base)->h_refcount == cpu_to_le32(1)) { +@@ -1974,7 +1957,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, + } + ea_bdebug(bs->bh, "modifying in-place"); + error = ext4_xattr_set_entry(i, s, handle, inode, +- true /* is_block */); ++ ea_inode, true /* is_block */); + ext4_xattr_block_csum_set(inode, bs->bh); + unlock_buffer(bs->bh); + if (error == -EFSCORRUPTED) +@@ -2042,29 +2025,13 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, + s->end = s->base + sb->s_blocksize; + } + +- error = ext4_xattr_set_entry(i, s, handle, inode, true /* is_block */); ++ error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode, ++ true /* is_block */); + if (error == -EFSCORRUPTED) + goto bad_block; + if (error) + goto cleanup; + +- if (i->value && s->here->e_value_inum) { +- /* +- * A ref count on ea_inode has been taken as part of the call to +- * ext4_xattr_set_entry() above. We would like to drop this +- * extra ref but we have to wait until the xattr block is +- * initialized and has its own ref count on the ea_inode. +- */ +- ea_ino = le32_to_cpu(s->here->e_value_inum); +- error = ext4_xattr_inode_iget(inode, ea_ino, +- le32_to_cpu(s->here->e_hash), +- &ea_inode); +- if (error) { +- ea_inode = NULL; +- goto cleanup; +- } +- } +- + inserted: + if (!IS_LAST_ENTRY(s->first)) { + new_bh = ext4_xattr_block_cache_find(inode, header(s->base), +@@ -2217,17 +2184,16 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, + + cleanup: + if (ea_inode) { +- int error2; +- +- error2 = ext4_xattr_inode_dec_ref(handle, ea_inode); +- if (error2) +- ext4_warning_inode(ea_inode, "dec ref error=%d", +- error2); ++ if (error) { ++ int error2; + +- /* If there was an error, revert the quota charge. */ +- if (error) ++ error2 = ext4_xattr_inode_dec_ref(handle, ea_inode); ++ if (error2) ++ ext4_warning_inode(ea_inode, "dec ref error=%d", ++ error2); + ext4_xattr_inode_free_quota(inode, ea_inode, + i_size_read(ea_inode)); ++ } + iput(ea_inode); + } + if (ce) +@@ -2285,14 +2251,38 @@ int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode, + { + struct ext4_xattr_ibody_header *header; + struct ext4_xattr_search *s = &is->s; ++ struct inode *ea_inode = NULL; + int error; + + if (!EXT4_INODE_HAS_XATTR_SPACE(inode)) + return -ENOSPC; + +- error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */); +- if (error) ++ /* If we need EA inode, prepare it before locking the buffer */ ++ if (i->value && i->in_inode) { ++ WARN_ON_ONCE(!i->value_len); ++ ++ ea_inode = ext4_xattr_inode_lookup_create(handle, inode, ++ i->value, i->value_len); ++ if (IS_ERR(ea_inode)) ++ return PTR_ERR(ea_inode); ++ } ++ error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode, ++ false /* is_block */); ++ if (error) { ++ if (ea_inode) { ++ int error2; ++ ++ error2 = ext4_xattr_inode_dec_ref(handle, ea_inode); ++ if (error2) ++ ext4_warning_inode(ea_inode, "dec ref error=%d", ++ error2); ++ ++ ext4_xattr_inode_free_quota(inode, ea_inode, ++ i_size_read(ea_inode)); ++ iput(ea_inode); ++ } + return error; ++ } + header = IHDR(inode, ext4_raw_inode(&is->iloc)); + if (!IS_LAST_ENTRY(s->first)) { + header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); +@@ -2301,6 +2291,7 @@ int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode, + header->h_magic = cpu_to_le32(0); + ext4_clear_inode_state(inode, EXT4_STATE_XATTR); + } ++ iput(ea_inode); + return 0; + } + +diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c +index ad8dfac73bd446..6a9a470345bfc7 100644 +--- a/fs/f2fs/extent_cache.c ++++ b/fs/f2fs/extent_cache.c +@@ -19,34 +19,24 @@ + #include "node.h" + #include + +-bool sanity_check_extent_cache(struct inode *inode) ++bool sanity_check_extent_cache(struct inode *inode, struct page *ipage) + { + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); +- struct f2fs_inode_info *fi = F2FS_I(inode); +- struct extent_tree *et = fi->extent_tree[EX_READ]; +- struct extent_info *ei; +- +- if (!et) +- return true; ++ struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext; ++ struct extent_info ei; + +- ei = &et->largest; +- if (!ei->len) +- return true; ++ get_read_extent_info(&ei, i_ext); + +- /* Let's drop, if checkpoint got corrupted. */ +- if (is_set_ckpt_flags(sbi, CP_ERROR_FLAG)) { +- ei->len = 0; +- et->largest_updated = true; ++ if (!ei.len) + return true; +- } + +- if (!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC_ENHANCE) || +- !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1, ++ if (!f2fs_is_valid_blkaddr(sbi, ei.blk, DATA_GENERIC_ENHANCE) || ++ !f2fs_is_valid_blkaddr(sbi, ei.blk + ei.len - 1, + DATA_GENERIC_ENHANCE)) { + set_sbi_flag(sbi, SBI_NEED_FSCK); + f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix", + __func__, inode->i_ino, +- ei->blk, ei->fofs, ei->len); ++ ei.blk, ei.fofs, ei.len); + return false; + } + return true; +@@ -395,24 +385,22 @@ void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage) + + if (!__may_extent_tree(inode, EX_READ)) { + /* drop largest read extent */ +- if (i_ext && i_ext->len) { ++ if (i_ext->len) { + f2fs_wait_on_page_writeback(ipage, NODE, true, true); + i_ext->len = 0; + set_page_dirty(ipage); + } +- goto out; ++ set_inode_flag(inode, FI_NO_EXTENT); ++ return; + } + + et = __grab_extent_tree(inode, EX_READ); + +- if (!i_ext || !i_ext->len) +- goto out; +- + get_read_extent_info(&ei, i_ext); + + write_lock(&et->lock); +- if (atomic_read(&et->node_cnt)) +- goto unlock_out; ++ if (atomic_read(&et->node_cnt) || !ei.len) ++ goto skip; + + en = __attach_extent_node(sbi, et, &ei, NULL, + &et->root.rb_root.rb_node, true); +@@ -424,11 +412,13 @@ void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage) + list_add_tail(&en->list, &eti->extent_list); + spin_unlock(&eti->extent_lock); + } +-unlock_out: ++skip: ++ /* Let's drop, if checkpoint got corrupted. */ ++ if (f2fs_cp_error(sbi)) { ++ et->largest.len = 0; ++ et->largest_updated = true; ++ } + write_unlock(&et->lock); +-out: +- if (!F2FS_I(inode)->extent_tree[EX_READ]) +- set_inode_flag(inode, FI_NO_EXTENT); + } + + void f2fs_init_age_extent_tree(struct inode *inode) +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h +index 19490dd8321943..00eff023cd9d63 100644 +--- a/fs/f2fs/f2fs.h ++++ b/fs/f2fs/f2fs.h +@@ -4189,7 +4189,7 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); + /* + * extent_cache.c + */ +-bool sanity_check_extent_cache(struct inode *inode); ++bool sanity_check_extent_cache(struct inode *inode, struct page *ipage); + void f2fs_init_extent_tree(struct inode *inode); + void f2fs_drop_extent_tree(struct inode *inode); + void f2fs_destroy_extent_node(struct inode *inode); +diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c +index afb7c88ba06b2c..888c301ffe8f4c 100644 +--- a/fs/f2fs/gc.c ++++ b/fs/f2fs/gc.c +@@ -1563,6 +1563,16 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, + continue; + } + ++ if (f2fs_has_inline_data(inode)) { ++ iput(inode); ++ set_sbi_flag(sbi, SBI_NEED_FSCK); ++ f2fs_err_ratelimited(sbi, ++ "inode %lx has both inline_data flag and " ++ "data block, nid=%u, ofs_in_node=%u", ++ inode->i_ino, dni.nid, ofs_in_node); ++ continue; ++ } ++ + err = f2fs_gc_pinned_control(inode, gc_type, segno); + if (err == -EAGAIN) { + iput(inode); +diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c +index 0172f4e503061d..26e857fee631d9 100644 +--- a/fs/f2fs/inode.c ++++ b/fs/f2fs/inode.c +@@ -511,16 +511,16 @@ static int do_read_inode(struct inode *inode) + + init_idisk_time(inode); + +- /* Need all the flag bits */ +- f2fs_init_read_extent_tree(inode, node_page); +- f2fs_init_age_extent_tree(inode); +- +- if (!sanity_check_extent_cache(inode)) { ++ if (!sanity_check_extent_cache(inode, node_page)) { + f2fs_put_page(node_page, 1); + f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE); + return -EFSCORRUPTED; + } + ++ /* Need all the flag bits */ ++ f2fs_init_read_extent_tree(inode, node_page); ++ f2fs_init_age_extent_tree(inode); ++ + f2fs_put_page(node_page, 1); + + stat_inc_inline_xattr(inode); +diff --git a/fs/fhandle.c b/fs/fhandle.c +index 99dcf07cfecfe1..c361d7ff1b88dd 100644 +--- a/fs/fhandle.c ++++ b/fs/fhandle.c +@@ -40,7 +40,7 @@ static long do_sys_name_to_handle(const struct path *path, + if (f_handle.handle_bytes > MAX_HANDLE_SZ) + return -EINVAL; + +- handle = kzalloc(sizeof(struct file_handle) + f_handle.handle_bytes, ++ handle = kzalloc(struct_size(handle, f_handle, f_handle.handle_bytes), + GFP_KERNEL); + if (!handle) + return -ENOMEM; +@@ -75,7 +75,7 @@ static long do_sys_name_to_handle(const struct path *path, + /* copy the mount id */ + if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) || + copy_to_user(ufh, handle, +- sizeof(struct file_handle) + handle_bytes)) ++ struct_size(handle, f_handle, handle_bytes))) + retval = -EFAULT; + kfree(handle); + return retval; +@@ -196,7 +196,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh, + retval = -EINVAL; + goto out_err; + } +- handle = kmalloc(sizeof(struct file_handle) + f_handle.handle_bytes, ++ handle = kmalloc(struct_size(handle, f_handle, f_handle.handle_bytes), + GFP_KERNEL); + if (!handle) { + retval = -ENOMEM; +diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c +index cb3cda1390adb1..5713994328cbcb 100644 +--- a/fs/jfs/jfs_dmap.c ++++ b/fs/jfs/jfs_dmap.c +@@ -1626,6 +1626,8 @@ s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen) + } else if (rc == -ENOSPC) { + /* search for next smaller log2 block */ + l2nb = BLKSTOL2(nblocks) - 1; ++ if (unlikely(l2nb < 0)) ++ break; + nblocks = 1LL << l2nb; + } else { + /* Trim any already allocated blocks */ +diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c +index 031d8f570f581f..5d3127ca68a42d 100644 +--- a/fs/jfs/jfs_dtree.c ++++ b/fs/jfs/jfs_dtree.c +@@ -834,6 +834,8 @@ int dtInsert(tid_t tid, struct inode *ip, + * the full page. + */ + DT_GETSEARCH(ip, btstack->top, bn, mp, p, index); ++ if (p->header.freelist == 0) ++ return -EINVAL; + + /* + * insert entry for new key +diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c +index e855b8fde76ce1..cb6d1fda66a702 100644 +--- a/fs/jfs/jfs_logmgr.c ++++ b/fs/jfs/jfs_logmgr.c +@@ -1058,7 +1058,7 @@ void jfs_syncpt(struct jfs_log *log, int hard_sync) + int lmLogOpen(struct super_block *sb) + { + int rc; +- struct block_device *bdev; ++ struct bdev_handle *bdev_handle; + struct jfs_log *log; + struct jfs_sb_info *sbi = JFS_SBI(sb); + +@@ -1070,7 +1070,7 @@ int lmLogOpen(struct super_block *sb) + + mutex_lock(&jfs_log_mutex); + list_for_each_entry(log, &jfs_external_logs, journal_list) { +- if (log->bdev->bd_dev == sbi->logdev) { ++ if (log->bdev_handle->bdev->bd_dev == sbi->logdev) { + if (!uuid_equal(&log->uuid, &sbi->loguuid)) { + jfs_warn("wrong uuid on JFS journal"); + mutex_unlock(&jfs_log_mutex); +@@ -1100,14 +1100,14 @@ int lmLogOpen(struct super_block *sb) + * file systems to log may have n-to-1 relationship; + */ + +- bdev = blkdev_get_by_dev(sbi->logdev, BLK_OPEN_READ | BLK_OPEN_WRITE, +- log, NULL); +- if (IS_ERR(bdev)) { +- rc = PTR_ERR(bdev); ++ bdev_handle = bdev_open_by_dev(sbi->logdev, ++ BLK_OPEN_READ | BLK_OPEN_WRITE, log, NULL); ++ if (IS_ERR(bdev_handle)) { ++ rc = PTR_ERR(bdev_handle); + goto free; + } + +- log->bdev = bdev; ++ log->bdev_handle = bdev_handle; + uuid_copy(&log->uuid, &sbi->loguuid); + + /* +@@ -1141,7 +1141,7 @@ int lmLogOpen(struct super_block *sb) + lbmLogShutdown(log); + + close: /* close external log device */ +- blkdev_put(bdev, log); ++ bdev_release(bdev_handle); + + free: /* free log descriptor */ + mutex_unlock(&jfs_log_mutex); +@@ -1162,7 +1162,7 @@ static int open_inline_log(struct super_block *sb) + init_waitqueue_head(&log->syncwait); + + set_bit(log_INLINELOG, &log->flag); +- log->bdev = sb->s_bdev; ++ log->bdev_handle = sb->s_bdev_handle; + log->base = addressPXD(&JFS_SBI(sb)->logpxd); + log->size = lengthPXD(&JFS_SBI(sb)->logpxd) >> + (L2LOGPSIZE - sb->s_blocksize_bits); +@@ -1436,7 +1436,7 @@ int lmLogClose(struct super_block *sb) + { + struct jfs_sb_info *sbi = JFS_SBI(sb); + struct jfs_log *log = sbi->log; +- struct block_device *bdev; ++ struct bdev_handle *bdev_handle; + int rc = 0; + + jfs_info("lmLogClose: log:0x%p", log); +@@ -1482,10 +1482,10 @@ int lmLogClose(struct super_block *sb) + * external log as separate logical volume + */ + list_del(&log->journal_list); +- bdev = log->bdev; ++ bdev_handle = log->bdev_handle; + rc = lmLogShutdown(log); + +- blkdev_put(bdev, log); ++ bdev_release(bdev_handle); + + kfree(log); + +@@ -1972,7 +1972,7 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp) + + bp->l_flag |= lbmREAD; + +- bio = bio_alloc(log->bdev, 1, REQ_OP_READ, GFP_NOFS); ++ bio = bio_alloc(log->bdev_handle->bdev, 1, REQ_OP_READ, GFP_NOFS); + bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); + __bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); + BUG_ON(bio->bi_iter.bi_size != LOGPSIZE); +@@ -2110,10 +2110,15 @@ static void lbmStartIO(struct lbuf * bp) + { + struct bio *bio; + struct jfs_log *log = bp->l_log; ++ struct block_device *bdev = NULL; + + jfs_info("lbmStartIO"); + +- bio = bio_alloc(log->bdev, 1, REQ_OP_WRITE | REQ_SYNC, GFP_NOFS); ++ if (!log->no_integrity) ++ bdev = log->bdev_handle->bdev; ++ ++ bio = bio_alloc(bdev, 1, REQ_OP_WRITE | REQ_SYNC, ++ GFP_NOFS); + bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); + __bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); + BUG_ON(bio->bi_iter.bi_size != LOGPSIZE); +diff --git a/fs/jfs/jfs_logmgr.h b/fs/jfs/jfs_logmgr.h +index 805877ce502044..84aa2d25390743 100644 +--- a/fs/jfs/jfs_logmgr.h ++++ b/fs/jfs/jfs_logmgr.h +@@ -356,7 +356,7 @@ struct jfs_log { + * before writing syncpt. + */ + struct list_head journal_list; /* Global list */ +- struct block_device *bdev; /* 4: log lv pointer */ ++ struct bdev_handle *bdev_handle; /* 4: log lv pointer */ + int serial; /* 4: log mount serial number */ + + s64 base; /* @8: log extent address (inline log ) */ +diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c +index 631b8bd3e43849..9b5c6a20b30c83 100644 +--- a/fs/jfs/jfs_mount.c ++++ b/fs/jfs/jfs_mount.c +@@ -430,7 +430,8 @@ int updateSuper(struct super_block *sb, uint state) + + if (state == FM_MOUNT) { + /* record log's dev_t and mount serial number */ +- j_sb->s_logdev = cpu_to_le32(new_encode_dev(sbi->log->bdev->bd_dev)); ++ j_sb->s_logdev = cpu_to_le32( ++ new_encode_dev(sbi->log->bdev_handle->bdev->bd_dev)); + j_sb->s_logserial = cpu_to_le32(sbi->log->serial); + } else if (state == FM_CLEAN) { + /* +diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c +index 6579948070a482..a62331487ebf16 100644 +--- a/fs/lockd/svc.c ++++ b/fs/lockd/svc.c +@@ -712,8 +712,6 @@ static const struct svc_version *nlmsvc_version[] = { + #endif + }; + +-static struct svc_stat nlmsvc_stats; +- + #define NLM_NRVERS ARRAY_SIZE(nlmsvc_version) + static struct svc_program nlmsvc_program = { + .pg_prog = NLM_PROGRAM, /* program number */ +@@ -721,7 +719,6 @@ static struct svc_program nlmsvc_program = { + .pg_vers = nlmsvc_version, /* version table */ + .pg_name = "lockd", /* service name */ + .pg_class = "nfsd", /* share authentication with nfsd */ +- .pg_stats = &nlmsvc_stats, /* stats table */ + .pg_authenticate = &lockd_authenticate, /* export authentication */ + .pg_init_request = svc_generic_init_request, + .pg_rpcbind_set = svc_generic_rpcbind_set, +diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c +index 466ebf1d41b2b7..869c88978899c0 100644 +--- a/fs/nfs/callback.c ++++ b/fs/nfs/callback.c +@@ -399,15 +399,12 @@ static const struct svc_version *nfs4_callback_version[] = { + [4] = &nfs4_callback_version4, + }; + +-static struct svc_stat nfs4_callback_stats; +- + static struct svc_program nfs4_callback_program = { + .pg_prog = NFS4_CALLBACK, /* RPC service number */ + .pg_nvers = ARRAY_SIZE(nfs4_callback_version), /* Number of entries */ + .pg_vers = nfs4_callback_version, /* version table */ + .pg_name = "NFSv4 callback", /* service name */ + .pg_class = "nfs", /* authentication class */ +- .pg_stats = &nfs4_callback_stats, + .pg_authenticate = nfs_callback_authenticate, + .pg_init_request = svc_generic_init_request, + .pg_rpcbind_set = svc_generic_rpcbind_set, +diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h +index 4cbe0434cbb8ce..66a05fefae98ea 100644 +--- a/fs/nfsd/cache.h ++++ b/fs/nfsd/cache.h +@@ -80,8 +80,6 @@ enum { + + int nfsd_drc_slab_create(void); + void nfsd_drc_slab_free(void); +-int nfsd_net_reply_cache_init(struct nfsd_net *nn); +-void nfsd_net_reply_cache_destroy(struct nfsd_net *nn); + int nfsd_reply_cache_init(struct nfsd_net *); + void nfsd_reply_cache_shutdown(struct nfsd_net *); + int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start, +diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c +index 11a0eaa2f91407..b7da17e530077e 100644 +--- a/fs/nfsd/export.c ++++ b/fs/nfsd/export.c +@@ -339,12 +339,16 @@ static int export_stats_init(struct export_stats *stats) + + static void export_stats_reset(struct export_stats *stats) + { +- nfsd_percpu_counters_reset(stats->counter, EXP_STATS_COUNTERS_NUM); ++ if (stats) ++ nfsd_percpu_counters_reset(stats->counter, ++ EXP_STATS_COUNTERS_NUM); + } + + static void export_stats_destroy(struct export_stats *stats) + { +- nfsd_percpu_counters_destroy(stats->counter, EXP_STATS_COUNTERS_NUM); ++ if (stats) ++ nfsd_percpu_counters_destroy(stats->counter, ++ EXP_STATS_COUNTERS_NUM); + } + + static void svc_export_put(struct kref *ref) +@@ -353,7 +357,8 @@ static void svc_export_put(struct kref *ref) + path_put(&exp->ex_path); + auth_domain_put(exp->ex_client); + nfsd4_fslocs_free(&exp->ex_fslocs); +- export_stats_destroy(&exp->ex_stats); ++ export_stats_destroy(exp->ex_stats); ++ kfree(exp->ex_stats); + kfree(exp->ex_uuid); + kfree_rcu(exp, ex_rcu); + } +@@ -767,13 +772,15 @@ static int svc_export_show(struct seq_file *m, + seq_putc(m, '\t'); + seq_escape(m, exp->ex_client->name, " \t\n\\"); + if (export_stats) { +- seq_printf(m, "\t%lld\n", exp->ex_stats.start_time); ++ struct percpu_counter *counter = exp->ex_stats->counter; ++ ++ seq_printf(m, "\t%lld\n", exp->ex_stats->start_time); + seq_printf(m, "\tfh_stale: %lld\n", +- percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_FH_STALE])); ++ percpu_counter_sum_positive(&counter[EXP_STATS_FH_STALE])); + seq_printf(m, "\tio_read: %lld\n", +- percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_READ])); ++ percpu_counter_sum_positive(&counter[EXP_STATS_IO_READ])); + seq_printf(m, "\tio_write: %lld\n", +- percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_WRITE])); ++ percpu_counter_sum_positive(&counter[EXP_STATS_IO_WRITE])); + seq_putc(m, '\n'); + return 0; + } +@@ -819,7 +826,7 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem) + new->ex_layout_types = 0; + new->ex_uuid = NULL; + new->cd = item->cd; +- export_stats_reset(&new->ex_stats); ++ export_stats_reset(new->ex_stats); + } + + static void export_update(struct cache_head *cnew, struct cache_head *citem) +@@ -856,7 +863,14 @@ static struct cache_head *svc_export_alloc(void) + if (!i) + return NULL; + +- if (export_stats_init(&i->ex_stats)) { ++ i->ex_stats = kmalloc(sizeof(*(i->ex_stats)), GFP_KERNEL); ++ if (!i->ex_stats) { ++ kfree(i); ++ return NULL; ++ } ++ ++ if (export_stats_init(i->ex_stats)) { ++ kfree(i->ex_stats); + kfree(i); + return NULL; + } +diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h +index 2df8ae25aad302..ca9dc230ae3d0b 100644 +--- a/fs/nfsd/export.h ++++ b/fs/nfsd/export.h +@@ -64,10 +64,10 @@ struct svc_export { + struct cache_head h; + struct auth_domain * ex_client; + int ex_flags; ++ int ex_fsid; + struct path ex_path; + kuid_t ex_anon_uid; + kgid_t ex_anon_gid; +- int ex_fsid; + unsigned char * ex_uuid; /* 16 byte fsid */ + struct nfsd4_fs_locations ex_fslocs; + uint32_t ex_nflavors; +@@ -76,8 +76,8 @@ struct svc_export { + struct nfsd4_deviceid_map *ex_devid_map; + struct cache_detail *cd; + struct rcu_head ex_rcu; +- struct export_stats ex_stats; + unsigned long ex_xprtsec_modes; ++ struct export_stats *ex_stats; + }; + + /* an "export key" (expkey) maps a filehandlefragement to an +diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h +index ec49b200b79762..9bfca3dda63d33 100644 +--- a/fs/nfsd/netns.h ++++ b/fs/nfsd/netns.h +@@ -11,8 +11,10 @@ + #include + #include + #include ++#include + #include + #include ++#include + + /* Hash tables for nfs4_clientid state */ + #define CLIENT_HASH_BITS 4 +@@ -26,10 +28,22 @@ struct nfsd4_client_tracking_ops; + + enum { + /* cache misses due only to checksum comparison failures */ +- NFSD_NET_PAYLOAD_MISSES, ++ NFSD_STATS_PAYLOAD_MISSES, + /* amount of memory (in bytes) currently consumed by the DRC */ +- NFSD_NET_DRC_MEM_USAGE, +- NFSD_NET_COUNTERS_NUM ++ NFSD_STATS_DRC_MEM_USAGE, ++ NFSD_STATS_RC_HITS, /* repcache hits */ ++ NFSD_STATS_RC_MISSES, /* repcache misses */ ++ NFSD_STATS_RC_NOCACHE, /* uncached reqs */ ++ NFSD_STATS_FH_STALE, /* FH stale error */ ++ NFSD_STATS_IO_READ, /* bytes returned to read requests */ ++ NFSD_STATS_IO_WRITE, /* bytes passed in write requests */ ++#ifdef CONFIG_NFSD_V4 ++ NFSD_STATS_FIRST_NFS4_OP, /* count of individual nfsv4 operations */ ++ NFSD_STATS_LAST_NFS4_OP = NFSD_STATS_FIRST_NFS4_OP + LAST_NFS4_OP, ++#define NFSD_STATS_NFS4_OP(op) (NFSD_STATS_FIRST_NFS4_OP + (op)) ++ NFSD_STATS_WDELEG_GETATTR, /* count of getattr conflict with wdeleg */ ++#endif ++ NFSD_STATS_COUNTERS_NUM + }; + + /* +@@ -169,7 +183,10 @@ struct nfsd_net { + atomic_t num_drc_entries; + + /* Per-netns stats counters */ +- struct percpu_counter counter[NFSD_NET_COUNTERS_NUM]; ++ struct percpu_counter counter[NFSD_STATS_COUNTERS_NUM]; ++ ++ /* sunrpc svc stats */ ++ struct svc_stat nfsd_svcstats; + + /* longest hash chain seen */ + unsigned int longest_chain; +diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c +index 451026f9986b61..ae0057c54ef4ed 100644 +--- a/fs/nfsd/nfs4proc.c ++++ b/fs/nfsd/nfs4proc.c +@@ -2478,10 +2478,10 @@ nfsd4_proc_null(struct svc_rqst *rqstp) + return rpc_success; + } + +-static inline void nfsd4_increment_op_stats(u32 opnum) ++static inline void nfsd4_increment_op_stats(struct nfsd_net *nn, u32 opnum) + { + if (opnum >= FIRST_NFS4_OP && opnum <= LAST_NFS4_OP) +- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_NFS4_OP(opnum)]); ++ percpu_counter_inc(&nn->counter[NFSD_STATS_NFS4_OP(opnum)]); + } + + static const struct nfsd4_operation nfsd4_ops[]; +@@ -2756,7 +2756,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp) + status, nfsd4_op_name(op->opnum)); + + nfsd4_cstate_clear_replay(cstate); +- nfsd4_increment_op_stats(op->opnum); ++ nfsd4_increment_op_stats(nn, op->opnum); + } + + fh_put(current_fh); +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c +index c7e52d980cd75f..cdad1eaa4a3180 100644 +--- a/fs/nfsd/nfs4state.c ++++ b/fs/nfsd/nfs4state.c +@@ -8422,6 +8422,7 @@ __be32 + nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode) + { + __be32 status; ++ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); + struct file_lock_context *ctx; + struct file_lock *fl; + struct nfs4_delegation *dp; +@@ -8451,7 +8452,7 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode) + } + break_lease: + spin_unlock(&ctx->flc_lock); +- nfsd_stats_wdeleg_getattr_inc(); ++ nfsd_stats_wdeleg_getattr_inc(nn); + status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ)); + if (status != nfserr_jukebox || + !nfsd_wait_for_delegreturn(rqstp, inode)) +diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c +index 6cd36af2f97e10..c52132ecb339d5 100644 +--- a/fs/nfsd/nfscache.c ++++ b/fs/nfsd/nfscache.c +@@ -176,27 +176,6 @@ void nfsd_drc_slab_free(void) + kmem_cache_destroy(drc_slab); + } + +-/** +- * nfsd_net_reply_cache_init - per net namespace reply cache set-up +- * @nn: nfsd_net being initialized +- * +- * Returns zero on succes; otherwise a negative errno is returned. +- */ +-int nfsd_net_reply_cache_init(struct nfsd_net *nn) +-{ +- return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM); +-} +- +-/** +- * nfsd_net_reply_cache_destroy - per net namespace reply cache tear-down +- * @nn: nfsd_net being freed +- * +- */ +-void nfsd_net_reply_cache_destroy(struct nfsd_net *nn) +-{ +- nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM); +-} +- + int nfsd_reply_cache_init(struct nfsd_net *nn) + { + unsigned int hashsize; +@@ -502,7 +481,7 @@ nfsd_cache_insert(struct nfsd_drc_bucket *b, struct nfsd_cacherep *key, + int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start, + unsigned int len, struct nfsd_cacherep **cacherep) + { +- struct nfsd_net *nn; ++ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); + struct nfsd_cacherep *rp, *found; + __wsum csum; + struct nfsd_drc_bucket *b; +@@ -512,7 +491,7 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start, + int rtn = RC_DOIT; + + if (type == RC_NOCACHE) { +- nfsd_stats_rc_nocache_inc(); ++ nfsd_stats_rc_nocache_inc(nn); + goto out; + } + +@@ -522,7 +501,6 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start, + * Since the common case is a cache miss followed by an insert, + * preallocate an entry. + */ +- nn = net_generic(SVC_NET(rqstp), nfsd_net_id); + rp = nfsd_cacherep_alloc(rqstp, csum, nn); + if (!rp) + goto out; +@@ -540,7 +518,7 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start, + freed = nfsd_cacherep_dispose(&dispose); + trace_nfsd_drc_gc(nn, freed); + +- nfsd_stats_rc_misses_inc(); ++ nfsd_stats_rc_misses_inc(nn); + atomic_inc(&nn->num_drc_entries); + nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp)); + goto out; +@@ -548,7 +526,7 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start, + found_entry: + /* We found a matching entry which is either in progress or done. */ + nfsd_reply_cache_free_locked(NULL, rp, nn); +- nfsd_stats_rc_hits_inc(); ++ nfsd_stats_rc_hits_inc(nn); + rtn = RC_DROPIT; + rp = found; + +@@ -690,15 +668,15 @@ int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) + atomic_read(&nn->num_drc_entries)); + seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits); + seq_printf(m, "mem usage: %lld\n", +- percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE])); ++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_DRC_MEM_USAGE])); + seq_printf(m, "cache hits: %lld\n", +- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS])); ++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_HITS])); + seq_printf(m, "cache misses: %lld\n", +- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES])); ++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_MISSES])); + seq_printf(m, "not cached: %lld\n", +- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE])); ++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_NOCACHE])); + seq_printf(m, "payload misses: %lld\n", +- percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES])); ++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_PAYLOAD_MISSES])); + seq_printf(m, "longest chain len: %u\n", nn->longest_chain); + seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize); + return 0; +diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c +index a13e81e450718a..887035b7446763 100644 +--- a/fs/nfsd/nfsctl.c ++++ b/fs/nfsd/nfsctl.c +@@ -1524,14 +1524,17 @@ static __net_init int nfsd_net_init(struct net *net) + retval = nfsd_idmap_init(net); + if (retval) + goto out_idmap_error; +- retval = nfsd_net_reply_cache_init(nn); ++ retval = nfsd_stat_counters_init(nn); + if (retval) + goto out_repcache_error; ++ memset(&nn->nfsd_svcstats, 0, sizeof(nn->nfsd_svcstats)); ++ nn->nfsd_svcstats.program = &nfsd_program; + nn->nfsd_versions = NULL; + nn->nfsd4_minorversions = NULL; + nfsd4_init_leases_net(nn); + get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key)); + seqlock_init(&nn->writeverf_lock); ++ nfsd_proc_stat_init(net); + + return 0; + +@@ -1552,7 +1555,8 @@ static __net_exit void nfsd_net_exit(struct net *net) + { + struct nfsd_net *nn = net_generic(net, nfsd_net_id); + +- nfsd_net_reply_cache_destroy(nn); ++ nfsd_proc_stat_shutdown(net); ++ nfsd_stat_counters_destroy(nn); + nfsd_idmap_shutdown(net); + nfsd_export_shutdown(net); + nfsd_netns_free_versions(nn); +@@ -1575,12 +1579,9 @@ static int __init init_nfsd(void) + retval = nfsd4_init_pnfs(); + if (retval) + goto out_free_slabs; +- retval = nfsd_stat_init(); /* Statistics */ +- if (retval) +- goto out_free_pnfs; + retval = nfsd_drc_slab_create(); + if (retval) +- goto out_free_stat; ++ goto out_free_pnfs; + nfsd_lockd_init(); /* lockd->nfsd callbacks */ + retval = create_proc_exports_entry(); + if (retval) +@@ -1610,8 +1611,6 @@ static int __init init_nfsd(void) + out_free_lockd: + nfsd_lockd_shutdown(); + nfsd_drc_slab_free(); +-out_free_stat: +- nfsd_stat_shutdown(); + out_free_pnfs: + nfsd4_exit_pnfs(); + out_free_slabs: +@@ -1628,7 +1627,6 @@ static void __exit exit_nfsd(void) + nfsd_drc_slab_free(); + remove_proc_entry("fs/nfs/exports", NULL); + remove_proc_entry("fs/nfs", NULL); +- nfsd_stat_shutdown(); + nfsd_lockd_shutdown(); + nfsd4_free_slabs(); + nfsd4_exit_pnfs(); +diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h +index fe846a360ae18d..d05bd2b811f377 100644 +--- a/fs/nfsd/nfsd.h ++++ b/fs/nfsd/nfsd.h +@@ -69,6 +69,7 @@ extern struct mutex nfsd_mutex; + extern spinlock_t nfsd_drc_lock; + extern unsigned long nfsd_drc_max_mem; + extern unsigned long nfsd_drc_mem_used; ++extern atomic_t nfsd_th_cnt; /* number of available threads */ + + extern const struct seq_operations nfs_exports_op; + +diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c +index 937be276bb6b48..c2495d98c18928 100644 +--- a/fs/nfsd/nfsfh.c ++++ b/fs/nfsd/nfsfh.c +@@ -327,6 +327,7 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp) + __be32 + fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access) + { ++ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); + struct svc_export *exp = NULL; + struct dentry *dentry; + __be32 error; +@@ -395,7 +396,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access) + out: + trace_nfsd_fh_verify_err(rqstp, fhp, type, access, error); + if (error == nfserr_stale) +- nfsd_stats_fh_stale_inc(exp); ++ nfsd_stats_fh_stale_inc(nn, exp); + return error; + } + +diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c +index 7ef6af908faacb..7911c4b3b5d355 100644 +--- a/fs/nfsd/nfssvc.c ++++ b/fs/nfsd/nfssvc.c +@@ -34,6 +34,7 @@ + + #define NFSDDBG_FACILITY NFSDDBG_SVC + ++atomic_t nfsd_th_cnt = ATOMIC_INIT(0); + extern struct svc_program nfsd_program; + static int nfsd(void *vrqstp); + #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) +@@ -89,7 +90,6 @@ unsigned long nfsd_drc_max_mem; + unsigned long nfsd_drc_mem_used; + + #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) +-static struct svc_stat nfsd_acl_svcstats; + static const struct svc_version *nfsd_acl_version[] = { + # if defined(CONFIG_NFSD_V2_ACL) + [2] = &nfsd_acl_version2, +@@ -108,15 +108,11 @@ static struct svc_program nfsd_acl_program = { + .pg_vers = nfsd_acl_version, + .pg_name = "nfsacl", + .pg_class = "nfsd", +- .pg_stats = &nfsd_acl_svcstats, + .pg_authenticate = &svc_set_client, + .pg_init_request = nfsd_acl_init_request, + .pg_rpcbind_set = nfsd_acl_rpcbind_set, + }; + +-static struct svc_stat nfsd_acl_svcstats = { +- .program = &nfsd_acl_program, +-}; + #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */ + + static const struct svc_version *nfsd_version[] = { +@@ -141,7 +137,6 @@ struct svc_program nfsd_program = { + .pg_vers = nfsd_version, /* version table */ + .pg_name = "nfsd", /* program name */ + .pg_class = "nfsd", /* authentication class */ +- .pg_stats = &nfsd_svcstats, /* version table */ + .pg_authenticate = &svc_set_client, /* export authentication */ + .pg_init_request = nfsd_init_request, + .pg_rpcbind_set = nfsd_rpcbind_set, +@@ -675,7 +670,8 @@ int nfsd_create_serv(struct net *net) + if (nfsd_max_blksize == 0) + nfsd_max_blksize = nfsd_get_default_max_blksize(); + nfsd_reset_versions(nn); +- serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize, nfsd); ++ serv = svc_create_pooled(&nfsd_program, &nn->nfsd_svcstats, ++ nfsd_max_blksize, nfsd); + if (serv == NULL) + return -ENOMEM; + +@@ -950,7 +946,7 @@ nfsd(void *vrqstp) + + current->fs->umask = 0; + +- atomic_inc(&nfsdstats.th_cnt); ++ atomic_inc(&nfsd_th_cnt); + + set_freezable(); + +@@ -964,7 +960,7 @@ nfsd(void *vrqstp) + svc_recv(rqstp); + } + +- atomic_dec(&nfsdstats.th_cnt); ++ atomic_dec(&nfsd_th_cnt); + + out: + /* Release the thread */ +diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c +index 63797635e1c328..9f606fa08bd4b8 100644 +--- a/fs/nfsd/stats.c ++++ b/fs/nfsd/stats.c +@@ -27,25 +27,22 @@ + + #include "nfsd.h" + +-struct nfsd_stats nfsdstats; +-struct svc_stat nfsd_svcstats = { +- .program = &nfsd_program, +-}; +- + static int nfsd_show(struct seq_file *seq, void *v) + { ++ struct net *net = pde_data(file_inode(seq->file)); ++ struct nfsd_net *nn = net_generic(net, nfsd_net_id); + int i; + + seq_printf(seq, "rc %lld %lld %lld\nfh %lld 0 0 0 0\nio %lld %lld\n", +- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]), +- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]), +- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]), +- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_FH_STALE]), +- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_READ]), +- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_WRITE])); ++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_HITS]), ++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_MISSES]), ++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_NOCACHE]), ++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_FH_STALE]), ++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_IO_READ]), ++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_IO_WRITE])); + + /* thread usage: */ +- seq_printf(seq, "th %u 0", atomic_read(&nfsdstats.th_cnt)); ++ seq_printf(seq, "th %u 0", atomic_read(&nfsd_th_cnt)); + + /* deprecated thread usage histogram stats */ + for (i = 0; i < 10; i++) +@@ -55,7 +52,7 @@ static int nfsd_show(struct seq_file *seq, void *v) + seq_puts(seq, "\nra 0 0 0 0 0 0 0 0 0 0 0 0\n"); + + /* show my rpc info */ +- svc_seq_show(seq, &nfsd_svcstats); ++ svc_seq_show(seq, &nn->nfsd_svcstats); + + #ifdef CONFIG_NFSD_V4 + /* Show count for individual nfsv4 operations */ +@@ -63,10 +60,10 @@ static int nfsd_show(struct seq_file *seq, void *v) + seq_printf(seq,"proc4ops %u", LAST_NFS4_OP + 1); + for (i = 0; i <= LAST_NFS4_OP; i++) { + seq_printf(seq, " %lld", +- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_NFS4_OP(i)])); ++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_NFS4_OP(i)])); + } + seq_printf(seq, "\nwdeleg_getattr %lld", +- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_WDELEG_GETATTR])); ++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_WDELEG_GETATTR])); + + seq_putc(seq, '\n'); + #endif +@@ -76,7 +73,7 @@ static int nfsd_show(struct seq_file *seq, void *v) + + DEFINE_PROC_SHOW_ATTRIBUTE(nfsd); + +-int nfsd_percpu_counters_init(struct percpu_counter counters[], int num) ++int nfsd_percpu_counters_init(struct percpu_counter *counters, int num) + { + int i, err = 0; + +@@ -108,31 +105,24 @@ void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num) + percpu_counter_destroy(&counters[i]); + } + +-static int nfsd_stat_counters_init(void) ++int nfsd_stat_counters_init(struct nfsd_net *nn) + { +- return nfsd_percpu_counters_init(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM); ++ return nfsd_percpu_counters_init(nn->counter, NFSD_STATS_COUNTERS_NUM); + } + +-static void nfsd_stat_counters_destroy(void) ++void nfsd_stat_counters_destroy(struct nfsd_net *nn) + { +- nfsd_percpu_counters_destroy(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM); ++ nfsd_percpu_counters_destroy(nn->counter, NFSD_STATS_COUNTERS_NUM); + } + +-int nfsd_stat_init(void) ++void nfsd_proc_stat_init(struct net *net) + { +- int err; +- +- err = nfsd_stat_counters_init(); +- if (err) +- return err; ++ struct nfsd_net *nn = net_generic(net, nfsd_net_id); + +- svc_proc_register(&init_net, &nfsd_svcstats, &nfsd_proc_ops); +- +- return 0; ++ svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops); + } + +-void nfsd_stat_shutdown(void) ++void nfsd_proc_stat_shutdown(struct net *net) + { +- nfsd_stat_counters_destroy(); +- svc_proc_unregister(&init_net, "nfsd"); ++ svc_proc_unregister(net, "nfsd"); + } +diff --git a/fs/nfsd/stats.h b/fs/nfsd/stats.h +index cf5524e7ca0623..d2753e975dfd34 100644 +--- a/fs/nfsd/stats.h ++++ b/fs/nfsd/stats.h +@@ -10,94 +10,72 @@ + #include + #include + +- +-enum { +- NFSD_STATS_RC_HITS, /* repcache hits */ +- NFSD_STATS_RC_MISSES, /* repcache misses */ +- NFSD_STATS_RC_NOCACHE, /* uncached reqs */ +- NFSD_STATS_FH_STALE, /* FH stale error */ +- NFSD_STATS_IO_READ, /* bytes returned to read requests */ +- NFSD_STATS_IO_WRITE, /* bytes passed in write requests */ +-#ifdef CONFIG_NFSD_V4 +- NFSD_STATS_FIRST_NFS4_OP, /* count of individual nfsv4 operations */ +- NFSD_STATS_LAST_NFS4_OP = NFSD_STATS_FIRST_NFS4_OP + LAST_NFS4_OP, +-#define NFSD_STATS_NFS4_OP(op) (NFSD_STATS_FIRST_NFS4_OP + (op)) +- NFSD_STATS_WDELEG_GETATTR, /* count of getattr conflict with wdeleg */ +-#endif +- NFSD_STATS_COUNTERS_NUM +-}; +- +-struct nfsd_stats { +- struct percpu_counter counter[NFSD_STATS_COUNTERS_NUM]; +- +- atomic_t th_cnt; /* number of available threads */ +-}; +- +-extern struct nfsd_stats nfsdstats; +- +-extern struct svc_stat nfsd_svcstats; +- +-int nfsd_percpu_counters_init(struct percpu_counter counters[], int num); +-void nfsd_percpu_counters_reset(struct percpu_counter counters[], int num); +-void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num); +-int nfsd_stat_init(void); +-void nfsd_stat_shutdown(void); +- +-static inline void nfsd_stats_rc_hits_inc(void) ++int nfsd_percpu_counters_init(struct percpu_counter *counters, int num); ++void nfsd_percpu_counters_reset(struct percpu_counter *counters, int num); ++void nfsd_percpu_counters_destroy(struct percpu_counter *counters, int num); ++int nfsd_stat_counters_init(struct nfsd_net *nn); ++void nfsd_stat_counters_destroy(struct nfsd_net *nn); ++void nfsd_proc_stat_init(struct net *net); ++void nfsd_proc_stat_shutdown(struct net *net); ++ ++static inline void nfsd_stats_rc_hits_inc(struct nfsd_net *nn) + { +- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_HITS]); ++ percpu_counter_inc(&nn->counter[NFSD_STATS_RC_HITS]); + } + +-static inline void nfsd_stats_rc_misses_inc(void) ++static inline void nfsd_stats_rc_misses_inc(struct nfsd_net *nn) + { +- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_MISSES]); ++ percpu_counter_inc(&nn->counter[NFSD_STATS_RC_MISSES]); + } + +-static inline void nfsd_stats_rc_nocache_inc(void) ++static inline void nfsd_stats_rc_nocache_inc(struct nfsd_net *nn) + { +- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]); ++ percpu_counter_inc(&nn->counter[NFSD_STATS_RC_NOCACHE]); + } + +-static inline void nfsd_stats_fh_stale_inc(struct svc_export *exp) ++static inline void nfsd_stats_fh_stale_inc(struct nfsd_net *nn, ++ struct svc_export *exp) + { +- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_FH_STALE]); +- if (exp) +- percpu_counter_inc(&exp->ex_stats.counter[EXP_STATS_FH_STALE]); ++ percpu_counter_inc(&nn->counter[NFSD_STATS_FH_STALE]); ++ if (exp && exp->ex_stats) ++ percpu_counter_inc(&exp->ex_stats->counter[EXP_STATS_FH_STALE]); + } + +-static inline void nfsd_stats_io_read_add(struct svc_export *exp, s64 amount) ++static inline void nfsd_stats_io_read_add(struct nfsd_net *nn, ++ struct svc_export *exp, s64 amount) + { +- percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_READ], amount); +- if (exp) +- percpu_counter_add(&exp->ex_stats.counter[EXP_STATS_IO_READ], amount); ++ percpu_counter_add(&nn->counter[NFSD_STATS_IO_READ], amount); ++ if (exp && exp->ex_stats) ++ percpu_counter_add(&exp->ex_stats->counter[EXP_STATS_IO_READ], amount); + } + +-static inline void nfsd_stats_io_write_add(struct svc_export *exp, s64 amount) ++static inline void nfsd_stats_io_write_add(struct nfsd_net *nn, ++ struct svc_export *exp, s64 amount) + { +- percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_WRITE], amount); +- if (exp) +- percpu_counter_add(&exp->ex_stats.counter[EXP_STATS_IO_WRITE], amount); ++ percpu_counter_add(&nn->counter[NFSD_STATS_IO_WRITE], amount); ++ if (exp && exp->ex_stats) ++ percpu_counter_add(&exp->ex_stats->counter[EXP_STATS_IO_WRITE], amount); + } + + static inline void nfsd_stats_payload_misses_inc(struct nfsd_net *nn) + { +- percpu_counter_inc(&nn->counter[NFSD_NET_PAYLOAD_MISSES]); ++ percpu_counter_inc(&nn->counter[NFSD_STATS_PAYLOAD_MISSES]); + } + + static inline void nfsd_stats_drc_mem_usage_add(struct nfsd_net *nn, s64 amount) + { +- percpu_counter_add(&nn->counter[NFSD_NET_DRC_MEM_USAGE], amount); ++ percpu_counter_add(&nn->counter[NFSD_STATS_DRC_MEM_USAGE], amount); + } + + static inline void nfsd_stats_drc_mem_usage_sub(struct nfsd_net *nn, s64 amount) + { +- percpu_counter_sub(&nn->counter[NFSD_NET_DRC_MEM_USAGE], amount); ++ percpu_counter_sub(&nn->counter[NFSD_STATS_DRC_MEM_USAGE], amount); + } + + #ifdef CONFIG_NFSD_V4 +-static inline void nfsd_stats_wdeleg_getattr_inc(void) ++static inline void nfsd_stats_wdeleg_getattr_inc(struct nfsd_net *nn) + { +- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_WDELEG_GETATTR]); ++ percpu_counter_inc(&nn->counter[NFSD_STATS_WDELEG_GETATTR]); + } + #endif + #endif /* _NFSD_STATS_H */ +diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c +index d0fdf70ab20d36..1f2a5b22b6498e 100644 +--- a/fs/nfsd/vfs.c ++++ b/fs/nfsd/vfs.c +@@ -985,7 +985,9 @@ static __be32 nfsd_finish_read(struct svc_rqst *rqstp, struct svc_fh *fhp, + unsigned long *count, u32 *eof, ssize_t host_err) + { + if (host_err >= 0) { +- nfsd_stats_io_read_add(fhp->fh_export, host_err); ++ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); ++ ++ nfsd_stats_io_read_add(nn, fhp->fh_export, host_err); + *eof = nfsd_eof_on_read(file, offset, host_err, *count); + *count = host_err; + fsnotify_access(file); +@@ -1168,7 +1170,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf, + goto out_nfserr; + } + *cnt = host_err; +- nfsd_stats_io_write_add(exp, *cnt); ++ nfsd_stats_io_write_add(nn, exp, *cnt); + fsnotify_modify(file); + host_err = filemap_check_wb_err(file->f_mapping, since); + if (host_err < 0) +diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c +index 424865dfca74ba..45b687aff700be 100644 +--- a/fs/ntfs3/frecord.c ++++ b/fs/ntfs3/frecord.c +@@ -1896,6 +1896,47 @@ enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr, + return REPARSE_LINK; + } + ++/* ++ * fiemap_fill_next_extent_k - a copy of fiemap_fill_next_extent ++ * but it accepts kernel address for fi_extents_start ++ */ ++static int fiemap_fill_next_extent_k(struct fiemap_extent_info *fieinfo, ++ u64 logical, u64 phys, u64 len, u32 flags) ++{ ++ struct fiemap_extent extent; ++ struct fiemap_extent __user *dest = fieinfo->fi_extents_start; ++ ++ /* only count the extents */ ++ if (fieinfo->fi_extents_max == 0) { ++ fieinfo->fi_extents_mapped++; ++ return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0; ++ } ++ ++ if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max) ++ return 1; ++ ++ if (flags & FIEMAP_EXTENT_DELALLOC) ++ flags |= FIEMAP_EXTENT_UNKNOWN; ++ if (flags & FIEMAP_EXTENT_DATA_ENCRYPTED) ++ flags |= FIEMAP_EXTENT_ENCODED; ++ if (flags & (FIEMAP_EXTENT_DATA_TAIL | FIEMAP_EXTENT_DATA_INLINE)) ++ flags |= FIEMAP_EXTENT_NOT_ALIGNED; ++ ++ memset(&extent, 0, sizeof(extent)); ++ extent.fe_logical = logical; ++ extent.fe_physical = phys; ++ extent.fe_length = len; ++ extent.fe_flags = flags; ++ ++ dest += fieinfo->fi_extents_mapped; ++ memcpy(dest, &extent, sizeof(extent)); ++ ++ fieinfo->fi_extents_mapped++; ++ if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max) ++ return 1; ++ return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0; ++} ++ + /* + * ni_fiemap - Helper for file_fiemap(). + * +@@ -1906,6 +1947,8 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, + __u64 vbo, __u64 len) + { + int err = 0; ++ struct fiemap_extent __user *fe_u = fieinfo->fi_extents_start; ++ struct fiemap_extent *fe_k = NULL; + struct ntfs_sb_info *sbi = ni->mi.sbi; + u8 cluster_bits = sbi->cluster_bits; + struct runs_tree *run; +@@ -1953,6 +1996,18 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, + goto out; + } + ++ /* ++ * To avoid lock problems replace pointer to user memory by pointer to kernel memory. ++ */ ++ fe_k = kmalloc_array(fieinfo->fi_extents_max, ++ sizeof(struct fiemap_extent), ++ GFP_NOFS | __GFP_ZERO); ++ if (!fe_k) { ++ err = -ENOMEM; ++ goto out; ++ } ++ fieinfo->fi_extents_start = fe_k; ++ + end = vbo + len; + alloc_size = le64_to_cpu(attr->nres.alloc_size); + if (end > alloc_size) +@@ -2041,8 +2096,9 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, + if (vbo + dlen >= end) + flags |= FIEMAP_EXTENT_LAST; + +- err = fiemap_fill_next_extent(fieinfo, vbo, lbo, dlen, +- flags); ++ err = fiemap_fill_next_extent_k(fieinfo, vbo, lbo, dlen, ++ flags); ++ + if (err < 0) + break; + if (err == 1) { +@@ -2062,7 +2118,8 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, + if (vbo + bytes >= end) + flags |= FIEMAP_EXTENT_LAST; + +- err = fiemap_fill_next_extent(fieinfo, vbo, lbo, bytes, flags); ++ err = fiemap_fill_next_extent_k(fieinfo, vbo, lbo, bytes, ++ flags); + if (err < 0) + break; + if (err == 1) { +@@ -2075,7 +2132,19 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, + + up_read(run_lock); + ++ /* ++ * Copy to user memory out of lock ++ */ ++ if (copy_to_user(fe_u, fe_k, ++ fieinfo->fi_extents_max * ++ sizeof(struct fiemap_extent))) { ++ err = -EFAULT; ++ } ++ + out: ++ /* Restore original pointer. */ ++ fieinfo->fi_extents_start = fe_u; ++ kfree(fe_k); + return err; + } + +diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c +index 0f1493e0f6d059..254f6359b287fa 100644 +--- a/fs/quota/quota_tree.c ++++ b/fs/quota/quota_tree.c +@@ -21,6 +21,12 @@ MODULE_AUTHOR("Jan Kara"); + MODULE_DESCRIPTION("Quota trie support"); + MODULE_LICENSE("GPL"); + ++/* ++ * Maximum quota tree depth we support. Only to limit recursion when working ++ * with the tree. ++ */ ++#define MAX_QTREE_DEPTH 6 ++ + #define __QUOTA_QT_PARANOIA + + static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) +@@ -327,27 +333,36 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, + + /* Insert reference to structure into the trie */ + static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, +- uint *treeblk, int depth) ++ uint *blks, int depth) + { + char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); + int ret = 0, newson = 0, newact = 0; + __le32 *ref; + uint newblk; ++ int i; + + if (!buf) + return -ENOMEM; +- if (!*treeblk) { ++ if (!blks[depth]) { + ret = get_free_dqblk(info); + if (ret < 0) + goto out_buf; +- *treeblk = ret; ++ for (i = 0; i < depth; i++) ++ if (ret == blks[i]) { ++ quota_error(dquot->dq_sb, ++ "Free block already used in tree: block %u", ++ ret); ++ ret = -EIO; ++ goto out_buf; ++ } ++ blks[depth] = ret; + memset(buf, 0, info->dqi_usable_bs); + newact = 1; + } else { +- ret = read_blk(info, *treeblk, buf); ++ ret = read_blk(info, blks[depth], buf); + if (ret < 0) { + quota_error(dquot->dq_sb, "Can't read tree quota " +- "block %u", *treeblk); ++ "block %u", blks[depth]); + goto out_buf; + } + } +@@ -357,8 +372,20 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, + info->dqi_blocks - 1); + if (ret) + goto out_buf; +- if (!newblk) ++ if (!newblk) { + newson = 1; ++ } else { ++ for (i = 0; i <= depth; i++) ++ if (newblk == blks[i]) { ++ quota_error(dquot->dq_sb, ++ "Cycle in quota tree detected: block %u index %u", ++ blks[depth], ++ get_index(info, dquot->dq_id, depth)); ++ ret = -EIO; ++ goto out_buf; ++ } ++ } ++ blks[depth + 1] = newblk; + if (depth == info->dqi_qtree_depth - 1) { + #ifdef __QUOTA_QT_PARANOIA + if (newblk) { +@@ -370,16 +397,16 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, + goto out_buf; + } + #endif +- newblk = find_free_dqentry(info, dquot, &ret); ++ blks[depth + 1] = find_free_dqentry(info, dquot, &ret); + } else { +- ret = do_insert_tree(info, dquot, &newblk, depth+1); ++ ret = do_insert_tree(info, dquot, blks, depth + 1); + } + if (newson && ret >= 0) { + ref[get_index(info, dquot->dq_id, depth)] = +- cpu_to_le32(newblk); +- ret = write_blk(info, *treeblk, buf); ++ cpu_to_le32(blks[depth + 1]); ++ ret = write_blk(info, blks[depth], buf); + } else if (newact && ret < 0) { +- put_free_dqblk(info, buf, *treeblk); ++ put_free_dqblk(info, buf, blks[depth]); + } + out_buf: + kfree(buf); +@@ -390,7 +417,7 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, + static inline int dq_insert_tree(struct qtree_mem_dqinfo *info, + struct dquot *dquot) + { +- int tmp = QT_TREEOFF; ++ uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF }; + + #ifdef __QUOTA_QT_PARANOIA + if (info->dqi_blocks <= QT_TREEOFF) { +@@ -398,7 +425,11 @@ static inline int dq_insert_tree(struct qtree_mem_dqinfo *info, + return -EIO; + } + #endif +- return do_insert_tree(info, dquot, &tmp, 0); ++ if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) { ++ quota_error(dquot->dq_sb, "Quota tree depth too big!"); ++ return -EIO; ++ } ++ return do_insert_tree(info, dquot, blks, 0); + } + + /* +@@ -511,19 +542,20 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, + + /* Remove reference to dquot from tree */ + static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, +- uint *blk, int depth) ++ uint *blks, int depth) + { + char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); + int ret = 0; + uint newblk; + __le32 *ref = (__le32 *)buf; ++ int i; + + if (!buf) + return -ENOMEM; +- ret = read_blk(info, *blk, buf); ++ ret = read_blk(info, blks[depth], buf); + if (ret < 0) { + quota_error(dquot->dq_sb, "Can't read quota data block %u", +- *blk); ++ blks[depth]); + goto out_buf; + } + newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); +@@ -532,29 +564,38 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, + if (ret) + goto out_buf; + ++ for (i = 0; i <= depth; i++) ++ if (newblk == blks[i]) { ++ quota_error(dquot->dq_sb, ++ "Cycle in quota tree detected: block %u index %u", ++ blks[depth], ++ get_index(info, dquot->dq_id, depth)); ++ ret = -EIO; ++ goto out_buf; ++ } + if (depth == info->dqi_qtree_depth - 1) { + ret = free_dqentry(info, dquot, newblk); +- newblk = 0; ++ blks[depth + 1] = 0; + } else { +- ret = remove_tree(info, dquot, &newblk, depth+1); ++ blks[depth + 1] = newblk; ++ ret = remove_tree(info, dquot, blks, depth + 1); + } +- if (ret >= 0 && !newblk) { +- int i; ++ if (ret >= 0 && !blks[depth + 1]) { + ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); + /* Block got empty? */ + for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++) + ; + /* Don't put the root block into the free block list */ + if (i == (info->dqi_usable_bs >> 2) +- && *blk != QT_TREEOFF) { +- put_free_dqblk(info, buf, *blk); +- *blk = 0; ++ && blks[depth] != QT_TREEOFF) { ++ put_free_dqblk(info, buf, blks[depth]); ++ blks[depth] = 0; + } else { +- ret = write_blk(info, *blk, buf); ++ ret = write_blk(info, blks[depth], buf); + if (ret < 0) + quota_error(dquot->dq_sb, + "Can't write quota tree block %u", +- *blk); ++ blks[depth]); + } + } + out_buf: +@@ -565,11 +606,15 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, + /* Delete dquot from tree */ + int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) + { +- uint tmp = QT_TREEOFF; ++ uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF }; + + if (!dquot->dq_off) /* Even not allocated? */ + return 0; +- return remove_tree(info, dquot, &tmp, 0); ++ if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) { ++ quota_error(dquot->dq_sb, "Quota tree depth too big!"); ++ return -EIO; ++ } ++ return remove_tree(info, dquot, blks, 0); + } + EXPORT_SYMBOL(qtree_delete_dquot); + +@@ -613,18 +658,20 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, + + /* Find entry for given id in the tree */ + static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, +- struct dquot *dquot, uint blk, int depth) ++ struct dquot *dquot, uint *blks, int depth) + { + char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); + loff_t ret = 0; + __le32 *ref = (__le32 *)buf; ++ uint blk; ++ int i; + + if (!buf) + return -ENOMEM; +- ret = read_blk(info, blk, buf); ++ ret = read_blk(info, blks[depth], buf); + if (ret < 0) { + quota_error(dquot->dq_sb, "Can't read quota tree block %u", +- blk); ++ blks[depth]); + goto out_buf; + } + ret = 0; +@@ -636,8 +683,19 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, + if (ret) + goto out_buf; + ++ /* Check for cycles in the tree */ ++ for (i = 0; i <= depth; i++) ++ if (blk == blks[i]) { ++ quota_error(dquot->dq_sb, ++ "Cycle in quota tree detected: block %u index %u", ++ blks[depth], ++ get_index(info, dquot->dq_id, depth)); ++ ret = -EIO; ++ goto out_buf; ++ } ++ blks[depth + 1] = blk; + if (depth < info->dqi_qtree_depth - 1) +- ret = find_tree_dqentry(info, dquot, blk, depth+1); ++ ret = find_tree_dqentry(info, dquot, blks, depth + 1); + else + ret = find_block_dqentry(info, dquot, blk); + out_buf: +@@ -649,7 +707,13 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, + static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info, + struct dquot *dquot) + { +- return find_tree_dqentry(info, dquot, QT_TREEOFF, 0); ++ uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF }; ++ ++ if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) { ++ quota_error(dquot->dq_sb, "Quota tree depth too big!"); ++ return -EIO; ++ } ++ return find_tree_dqentry(info, dquot, blks, 0); + } + + int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) +diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c +index ae99e7b88205b2..7978ab671e0c6a 100644 +--- a/fs/quota/quota_v2.c ++++ b/fs/quota/quota_v2.c +@@ -166,14 +166,17 @@ static int v2_read_file_info(struct super_block *sb, int type) + i_size_read(sb_dqopt(sb)->files[type])); + goto out_free; + } +- if (qinfo->dqi_free_blk >= qinfo->dqi_blocks) { +- quota_error(sb, "Free block number too big (%u >= %u).", +- qinfo->dqi_free_blk, qinfo->dqi_blocks); ++ if (qinfo->dqi_free_blk && (qinfo->dqi_free_blk <= QT_TREEOFF || ++ qinfo->dqi_free_blk >= qinfo->dqi_blocks)) { ++ quota_error(sb, "Free block number %u out of range (%u, %u).", ++ qinfo->dqi_free_blk, QT_TREEOFF, qinfo->dqi_blocks); + goto out_free; + } +- if (qinfo->dqi_free_entry >= qinfo->dqi_blocks) { +- quota_error(sb, "Block with free entry too big (%u >= %u).", +- qinfo->dqi_free_entry, qinfo->dqi_blocks); ++ if (qinfo->dqi_free_entry && (qinfo->dqi_free_entry <= QT_TREEOFF || ++ qinfo->dqi_free_entry >= qinfo->dqi_blocks)) { ++ quota_error(sb, "Block with free entry %u out of range (%u, %u).", ++ qinfo->dqi_free_entry, QT_TREEOFF, ++ qinfo->dqi_blocks); + goto out_free; + } + ret = 0; +diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c +index 3676e02a0232a4..4ab8cab6ea6147 100644 +--- a/fs/reiserfs/stree.c ++++ b/fs/reiserfs/stree.c +@@ -1407,7 +1407,7 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th, + INITIALIZE_PATH(path); + int item_len = 0; + int tb_init = 0; +- struct cpu_key cpu_key; ++ struct cpu_key cpu_key = {}; + int retval; + int quota_cut_bytes = 0; + +diff --git a/fs/romfs/super.c b/fs/romfs/super.c +index 5c35f6c760377e..b1bdfbc211c3c0 100644 +--- a/fs/romfs/super.c ++++ b/fs/romfs/super.c +@@ -593,7 +593,7 @@ static void romfs_kill_sb(struct super_block *sb) + #ifdef CONFIG_ROMFS_ON_BLOCK + if (sb->s_bdev) { + sync_blockdev(sb->s_bdev); +- blkdev_put(sb->s_bdev, sb); ++ bdev_release(sb->s_bdev_handle); + } + #endif + } +diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c +index 581ce951933901..2dc730800f448d 100644 +--- a/fs/squashfs/block.c ++++ b/fs/squashfs/block.c +@@ -321,7 +321,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length, + TRACE("Block @ 0x%llx, %scompressed size %d\n", index - 2, + compressed ? "" : "un", length); + } +- if (length < 0 || length > output->length || ++ if (length <= 0 || length > output->length || + (index + length) > msblk->bytes_used) { + res = -EIO; + goto out; +diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c +index 8ba8c4c5077078..e8df6430444b01 100644 +--- a/fs/squashfs/file.c ++++ b/fs/squashfs/file.c +@@ -544,7 +544,8 @@ static void squashfs_readahead(struct readahead_control *ractl) + struct squashfs_page_actor *actor; + unsigned int nr_pages = 0; + struct page **pages; +- int i, file_end = i_size_read(inode) >> msblk->block_log; ++ int i; ++ loff_t file_end = i_size_read(inode) >> msblk->block_log; + unsigned int max_pages = 1UL << shift; + + readahead_expand(ractl, start, (len | mask) + 1); +diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c +index f1ccad519e28cc..763a3f7a75f6dd 100644 +--- a/fs/squashfs/file_direct.c ++++ b/fs/squashfs/file_direct.c +@@ -26,10 +26,10 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize, + struct inode *inode = target_page->mapping->host; + struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; + +- int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT; ++ loff_t file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT; + int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; +- int start_index = target_page->index & ~mask; +- int end_index = start_index | mask; ++ loff_t start_index = target_page->index & ~mask; ++ loff_t end_index = start_index | mask; + int i, n, pages, bytes, res = -ENOMEM; + struct page **page; + struct squashfs_page_actor *actor; +diff --git a/fs/super.c b/fs/super.c +index 576abb1ff0403d..b142e71eb8dfdd 100644 +--- a/fs/super.c ++++ b/fs/super.c +@@ -1490,14 +1490,16 @@ int setup_bdev_super(struct super_block *sb, int sb_flags, + struct fs_context *fc) + { + blk_mode_t mode = sb_open_mode(sb_flags); ++ struct bdev_handle *bdev_handle; + struct block_device *bdev; + +- bdev = blkdev_get_by_dev(sb->s_dev, mode, sb, &fs_holder_ops); +- if (IS_ERR(bdev)) { ++ bdev_handle = bdev_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops); ++ if (IS_ERR(bdev_handle)) { + if (fc) + errorf(fc, "%s: Can't open blockdev", fc->source); +- return PTR_ERR(bdev); ++ return PTR_ERR(bdev_handle); + } ++ bdev = bdev_handle->bdev; + + /* + * This really should be in blkdev_get_by_dev, but right now can't due +@@ -1505,7 +1507,7 @@ int setup_bdev_super(struct super_block *sb, int sb_flags, + * writable from userspace even for a read-only block device. + */ + if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) { +- blkdev_put(bdev, sb); ++ bdev_release(bdev_handle); + return -EACCES; + } + +@@ -1521,10 +1523,11 @@ int setup_bdev_super(struct super_block *sb, int sb_flags, + mutex_unlock(&bdev->bd_fsfreeze_mutex); + if (fc) + warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev); +- blkdev_put(bdev, sb); ++ bdev_release(bdev_handle); + return -EBUSY; + } + spin_lock(&sb_lock); ++ sb->s_bdev_handle = bdev_handle; + sb->s_bdev = bdev; + sb->s_bdi = bdi_get(bdev->bd_disk->bdi); + if (bdev_stable_writes(bdev)) +@@ -1657,7 +1660,7 @@ void kill_block_super(struct super_block *sb) + generic_shutdown_super(sb); + if (bdev) { + sync_blockdev(bdev); +- blkdev_put(bdev, sb); ++ bdev_release(sb->s_bdev_handle); + } + } + +diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h +index 265da00a1a8b1b..6eefe5153a6ff7 100644 +--- a/include/linux/cgroup-defs.h ++++ b/include/linux/cgroup-defs.h +@@ -543,6 +543,10 @@ struct cgroup_root { + /* Unique id for this hierarchy. */ + int hierarchy_id; + ++ /* A list running through the active hierarchies */ ++ struct list_head root_list; ++ struct rcu_head rcu; /* Must be near the top */ ++ + /* + * The root cgroup. The containing cgroup_root will be destroyed on its + * release. cgrp->ancestors[0] will be used overflowing into the +@@ -556,9 +560,6 @@ struct cgroup_root { + /* Number of cgroups in the hierarchy, used only for /proc/cgroups */ + atomic_t nr_cgrps; + +- /* A list running through the active hierarchies */ +- struct list_head root_list; +- + /* Hierarchy-specific flags */ + unsigned int flags; + +diff --git a/include/linux/fs.h b/include/linux/fs.h +index 56dce38c478627..43e640fb4a7f77 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -1036,7 +1036,7 @@ struct file_handle { + __u32 handle_bytes; + int handle_type; + /* file identifier */ +- unsigned char f_handle[]; ++ unsigned char f_handle[] __counted_by(handle_bytes); + }; + + static inline struct file *get_file(struct file *f) +@@ -1223,6 +1223,7 @@ struct super_block { + struct hlist_bl_head s_roots; /* alternate root dentries for NFS */ + struct list_head s_mounts; /* list of mounts; _not_ for fs use */ + struct block_device *s_bdev; ++ struct bdev_handle *s_bdev_handle; + struct backing_dev_info *s_bdi; + struct mtd_info *s_mtd; + struct hlist_node s_instances; +diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h +index bae5e2369b4f7a..1c1a5d926b1713 100644 +--- a/include/linux/sockptr.h ++++ b/include/linux/sockptr.h +@@ -50,11 +50,36 @@ static inline int copy_from_sockptr_offset(void *dst, sockptr_t src, + return 0; + } + ++/* Deprecated. ++ * This is unsafe, unless caller checked user provided optlen. ++ * Prefer copy_safe_from_sockptr() instead. ++ */ + static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size) + { + return copy_from_sockptr_offset(dst, src, 0, size); + } + ++/** ++ * copy_safe_from_sockptr: copy a struct from sockptr ++ * @dst: Destination address, in kernel space. This buffer must be @ksize ++ * bytes long. ++ * @ksize: Size of @dst struct. ++ * @optval: Source address. (in user or kernel space) ++ * @optlen: Size of @optval data. ++ * ++ * Returns: ++ * * -EINVAL: @optlen < @ksize ++ * * -EFAULT: access to userspace failed. ++ * * 0 : @ksize bytes were copied ++ */ ++static inline int copy_safe_from_sockptr(void *dst, size_t ksize, ++ sockptr_t optval, unsigned int optlen) ++{ ++ if (optlen < ksize) ++ return -EINVAL; ++ return copy_from_sockptr(dst, optval, ksize); ++} ++ + static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset, + const void *src, size_t size) + { +diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h +index dbf5b21feafe48..3d8b215f32d5b0 100644 +--- a/include/linux/sunrpc/svc.h ++++ b/include/linux/sunrpc/svc.h +@@ -336,7 +336,6 @@ struct svc_program { + const struct svc_version **pg_vers; /* version array */ + char * pg_name; /* service name */ + char * pg_class; /* class name: services sharing authentication */ +- struct svc_stat * pg_stats; /* rpc statistics */ + enum svc_auth_status (*pg_authenticate)(struct svc_rqst *rqstp); + __be32 (*pg_init_request)(struct svc_rqst *, + const struct svc_program *, +@@ -408,7 +407,9 @@ bool svc_rqst_replace_page(struct svc_rqst *rqstp, + void svc_rqst_release_pages(struct svc_rqst *rqstp); + void svc_rqst_free(struct svc_rqst *); + void svc_exit_thread(struct svc_rqst *); +-struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, ++struct svc_serv * svc_create_pooled(struct svc_program *prog, ++ struct svc_stat *stats, ++ unsigned int bufsize, + int (*threadfn)(void *data)); + int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); + int svc_pool_stats_open(struct svc_serv *serv, struct file *file); +diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h +index fb09fd1767f289..ba6e346c8d669a 100644 +--- a/include/uapi/linux/bpf.h ++++ b/include/uapi/linux/bpf.h +@@ -77,12 +77,29 @@ struct bpf_insn { + __s32 imm; /* signed immediate constant */ + }; + +-/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ ++/* Deprecated: use struct bpf_lpm_trie_key_u8 (when the "data" member is needed for ++ * byte access) or struct bpf_lpm_trie_key_hdr (when using an alternative type for ++ * the trailing flexible array member) instead. ++ */ + struct bpf_lpm_trie_key { + __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ + __u8 data[0]; /* Arbitrary size */ + }; + ++/* Header for bpf_lpm_trie_key structs */ ++struct bpf_lpm_trie_key_hdr { ++ __u32 prefixlen; ++}; ++ ++/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry, with trailing byte array. */ ++struct bpf_lpm_trie_key_u8 { ++ union { ++ struct bpf_lpm_trie_key_hdr hdr; ++ __u32 prefixlen; ++ }; ++ __u8 data[]; /* Arbitrary size */ ++}; ++ + struct bpf_cgroup_storage_key { + __u64 cgroup_inode_id; /* cgroup inode id */ + __u32 attach_type; /* program attach type (enum bpf_attach_type) */ +diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c +index b32be680da6cdc..d0febf07051edf 100644 +--- a/kernel/bpf/lpm_trie.c ++++ b/kernel/bpf/lpm_trie.c +@@ -164,13 +164,13 @@ static inline int extract_bit(const u8 *data, size_t index) + */ + static size_t longest_prefix_match(const struct lpm_trie *trie, + const struct lpm_trie_node *node, +- const struct bpf_lpm_trie_key *key) ++ const struct bpf_lpm_trie_key_u8 *key) + { + u32 limit = min(node->prefixlen, key->prefixlen); + u32 prefixlen = 0, i = 0; + + BUILD_BUG_ON(offsetof(struct lpm_trie_node, data) % sizeof(u32)); +- BUILD_BUG_ON(offsetof(struct bpf_lpm_trie_key, data) % sizeof(u32)); ++ BUILD_BUG_ON(offsetof(struct bpf_lpm_trie_key_u8, data) % sizeof(u32)); + + #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(CONFIG_64BIT) + +@@ -229,7 +229,7 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key) + { + struct lpm_trie *trie = container_of(map, struct lpm_trie, map); + struct lpm_trie_node *node, *found = NULL; +- struct bpf_lpm_trie_key *key = _key; ++ struct bpf_lpm_trie_key_u8 *key = _key; + + if (key->prefixlen > trie->max_prefixlen) + return NULL; +@@ -308,8 +308,9 @@ static long trie_update_elem(struct bpf_map *map, + { + struct lpm_trie *trie = container_of(map, struct lpm_trie, map); + struct lpm_trie_node *node, *im_node = NULL, *new_node = NULL; ++ struct lpm_trie_node *free_node = NULL; + struct lpm_trie_node __rcu **slot; +- struct bpf_lpm_trie_key *key = _key; ++ struct bpf_lpm_trie_key_u8 *key = _key; + unsigned long irq_flags; + unsigned int next_bit; + size_t matchlen = 0; +@@ -382,7 +383,7 @@ static long trie_update_elem(struct bpf_map *map, + trie->n_entries--; + + rcu_assign_pointer(*slot, new_node); +- kfree_rcu(node, rcu); ++ free_node = node; + + goto out; + } +@@ -429,6 +430,7 @@ static long trie_update_elem(struct bpf_map *map, + } + + spin_unlock_irqrestore(&trie->lock, irq_flags); ++ kfree_rcu(free_node, rcu); + + return ret; + } +@@ -437,7 +439,8 @@ static long trie_update_elem(struct bpf_map *map, + static long trie_delete_elem(struct bpf_map *map, void *_key) + { + struct lpm_trie *trie = container_of(map, struct lpm_trie, map); +- struct bpf_lpm_trie_key *key = _key; ++ struct lpm_trie_node *free_node = NULL, *free_parent = NULL; ++ struct bpf_lpm_trie_key_u8 *key = _key; + struct lpm_trie_node __rcu **trim, **trim2; + struct lpm_trie_node *node, *parent; + unsigned long irq_flags; +@@ -506,8 +509,8 @@ static long trie_delete_elem(struct bpf_map *map, void *_key) + else + rcu_assign_pointer( + *trim2, rcu_access_pointer(parent->child[0])); +- kfree_rcu(parent, rcu); +- kfree_rcu(node, rcu); ++ free_parent = parent; ++ free_node = node; + goto out; + } + +@@ -521,10 +524,12 @@ static long trie_delete_elem(struct bpf_map *map, void *_key) + rcu_assign_pointer(*trim, rcu_access_pointer(node->child[1])); + else + RCU_INIT_POINTER(*trim, NULL); +- kfree_rcu(node, rcu); ++ free_node = node; + + out: + spin_unlock_irqrestore(&trie->lock, irq_flags); ++ kfree_rcu(free_parent, rcu); ++ kfree_rcu(free_node, rcu); + + return ret; + } +@@ -536,7 +541,7 @@ static long trie_delete_elem(struct bpf_map *map, void *_key) + sizeof(struct lpm_trie_node)) + #define LPM_VAL_SIZE_MIN 1 + +-#define LPM_KEY_SIZE(X) (sizeof(struct bpf_lpm_trie_key) + (X)) ++#define LPM_KEY_SIZE(X) (sizeof(struct bpf_lpm_trie_key_u8) + (X)) + #define LPM_KEY_SIZE_MAX LPM_KEY_SIZE(LPM_DATA_SIZE_MAX) + #define LPM_KEY_SIZE_MIN LPM_KEY_SIZE(LPM_DATA_SIZE_MIN) + +@@ -565,7 +570,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr) + /* copy mandatory map attributes */ + bpf_map_init_from_attr(&trie->map, attr); + trie->data_size = attr->key_size - +- offsetof(struct bpf_lpm_trie_key, data); ++ offsetof(struct bpf_lpm_trie_key_u8, data); + trie->max_prefixlen = trie->data_size * 8; + + spin_lock_init(&trie->lock); +@@ -616,7 +621,7 @@ static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key) + { + struct lpm_trie_node *node, *next_node = NULL, *parent, *search_root; + struct lpm_trie *trie = container_of(map, struct lpm_trie, map); +- struct bpf_lpm_trie_key *key = _key, *next_key = _next_key; ++ struct bpf_lpm_trie_key_u8 *key = _key, *next_key = _next_key; + struct lpm_trie_node **node_stack = NULL; + int err = 0, stack_ptr = -1; + unsigned int next_bit; +@@ -703,7 +708,7 @@ static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key) + } + do_copy: + next_key->prefixlen = next_node->prefixlen; +- memcpy((void *)next_key + offsetof(struct bpf_lpm_trie_key, data), ++ memcpy((void *)next_key + offsetof(struct bpf_lpm_trie_key_u8, data), + next_node->data, trie->data_size); + free_stack: + kfree(node_stack); +@@ -715,7 +720,7 @@ static int trie_check_btf(const struct bpf_map *map, + const struct btf_type *key_type, + const struct btf_type *value_type) + { +- /* Keys must have struct bpf_lpm_trie_key embedded. */ ++ /* Keys must have struct bpf_lpm_trie_key_u8 embedded. */ + return BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ? + -EINVAL : 0; + } +diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h +index c56071f150f2ae..5e17f01ced9fd2 100644 +--- a/kernel/cgroup/cgroup-internal.h ++++ b/kernel/cgroup/cgroup-internal.h +@@ -170,7 +170,8 @@ extern struct list_head cgroup_roots; + + /* iterate across the hierarchies */ + #define for_each_root(root) \ +- list_for_each_entry((root), &cgroup_roots, root_list) ++ list_for_each_entry_rcu((root), &cgroup_roots, root_list, \ ++ lockdep_is_held(&cgroup_mutex)) + + /** + * for_each_subsys - iterate all enabled cgroup subsystems +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c +index 094f513319259d..d872fff901073f 100644 +--- a/kernel/cgroup/cgroup.c ++++ b/kernel/cgroup/cgroup.c +@@ -1313,7 +1313,7 @@ static void cgroup_exit_root_id(struct cgroup_root *root) + + void cgroup_free_root(struct cgroup_root *root) + { +- kfree(root); ++ kfree_rcu(root, rcu); + } + + static void cgroup_destroy_root(struct cgroup_root *root) +@@ -1346,7 +1346,7 @@ static void cgroup_destroy_root(struct cgroup_root *root) + spin_unlock_irq(&css_set_lock); + + if (!list_empty(&root->root_list)) { +- list_del(&root->root_list); ++ list_del_rcu(&root->root_list); + cgroup_root_count--; + } + +@@ -1386,7 +1386,15 @@ static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset, + } + } + +- BUG_ON(!res_cgroup); ++ /* ++ * If cgroup_mutex is not held, the cgrp_cset_link will be freed ++ * before we remove the cgroup root from the root_list. Consequently, ++ * when accessing a cgroup root, the cset_link may have already been ++ * freed, resulting in a NULL res_cgroup. However, by holding the ++ * cgroup_mutex, we ensure that res_cgroup can't be NULL. ++ * If we don't hold cgroup_mutex in the caller, we must do the NULL ++ * check. ++ */ + return res_cgroup; + } + +@@ -1445,7 +1453,6 @@ static struct cgroup *current_cgns_cgroup_dfl(void) + static struct cgroup *cset_cgroup_from_root(struct css_set *cset, + struct cgroup_root *root) + { +- lockdep_assert_held(&cgroup_mutex); + lockdep_assert_held(&css_set_lock); + + return __cset_cgroup_from_root(cset, root); +@@ -1453,7 +1460,9 @@ static struct cgroup *cset_cgroup_from_root(struct css_set *cset, + + /* + * Return the cgroup for "task" from the given hierarchy. Must be +- * called with cgroup_mutex and css_set_lock held. ++ * called with css_set_lock held to prevent task's groups from being modified. ++ * Must be called with either cgroup_mutex or rcu read lock to prevent the ++ * cgroup root from being destroyed. + */ + struct cgroup *task_cgroup_from_root(struct task_struct *task, + struct cgroup_root *root) +@@ -2014,7 +2023,7 @@ void init_cgroup_root(struct cgroup_fs_context *ctx) + struct cgroup_root *root = ctx->root; + struct cgroup *cgrp = &root->cgrp; + +- INIT_LIST_HEAD(&root->root_list); ++ INIT_LIST_HEAD_RCU(&root->root_list); + atomic_set(&root->nr_cgrps, 1); + cgrp->root = root; + init_cgroup_housekeeping(cgrp); +@@ -2097,7 +2106,7 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask) + * care of subsystems' refcounts, which are explicitly dropped in + * the failure exit path. + */ +- list_add(&root->root_list, &cgroup_roots); ++ list_add_rcu(&root->root_list, &cgroup_roots); + cgroup_root_count++; + + /* +diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c +index 5ecd072a34fe72..eb86283901565b 100644 +--- a/kernel/irq/cpuhotplug.c ++++ b/kernel/irq/cpuhotplug.c +@@ -130,6 +130,22 @@ static bool migrate_one_irq(struct irq_desc *desc) + * CPU. + */ + err = irq_do_set_affinity(d, affinity, false); ++ ++ /* ++ * If there are online CPUs in the affinity mask, but they have no ++ * vectors left to make the migration work, try to break the ++ * affinity by migrating to any online CPU. ++ */ ++ if (err == -ENOSPC && !irqd_affinity_is_managed(d) && affinity != cpu_online_mask) { ++ pr_debug("IRQ%u: set affinity failed for %*pbl, re-try with online CPUs\n", ++ d->irq, cpumask_pr_args(affinity)); ++ ++ affinity = cpu_online_mask; ++ brokeaff = true; ++ ++ err = irq_do_set_affinity(d, affinity, false); ++ } ++ + if (err) { + pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n", + d->irq, err); +@@ -195,10 +211,15 @@ static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu) + !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity)) + return; + +- if (irqd_is_managed_and_shutdown(data)) { +- irq_startup(desc, IRQ_RESEND, IRQ_START_COND); ++ /* ++ * Don't restore suspended interrupts here when a system comes back ++ * from S3. They are reenabled via resume_device_irqs(). ++ */ ++ if (desc->istate & IRQS_SUSPENDED) + return; +- } ++ ++ if (irqd_is_managed_and_shutdown(data)) ++ irq_startup(desc, IRQ_RESEND, IRQ_START_COND); + + /* + * If the interrupt can only be directed to a single target +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c +index a054cd5ec08bce..8a936c1ffad390 100644 +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -796,10 +796,14 @@ void __enable_irq(struct irq_desc *desc) + irq_settings_set_noprobe(desc); + /* + * Call irq_startup() not irq_enable() here because the +- * interrupt might be marked NOAUTOEN. So irq_startup() +- * needs to be invoked when it gets enabled the first +- * time. If it was already started up, then irq_startup() +- * will invoke irq_enable() under the hood. ++ * interrupt might be marked NOAUTOEN so irq_startup() ++ * needs to be invoked when it gets enabled the first time. ++ * This is also required when __enable_irq() is invoked for ++ * a managed and shutdown interrupt from the S3 resume ++ * path. ++ * ++ * If it was already started up, then irq_startup() will ++ * invoke irq_enable() under the hood. + */ + irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE); + break; +diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c +index 13f0d11927074a..68af76ca8bc992 100644 +--- a/mm/debug_vm_pgtable.c ++++ b/mm/debug_vm_pgtable.c +@@ -39,22 +39,7 @@ + * Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics + * expectations that are being validated here. All future changes in here + * or the documentation need to be in sync. +- * +- * On s390 platform, the lower 4 bits are used to identify given page table +- * entry type. But these bits might affect the ability to clear entries with +- * pxx_clear() because of how dynamic page table folding works on s390. So +- * while loading up the entries do not change the lower 4 bits. It does not +- * have affect any other platform. Also avoid the 62nd bit on ppc64 that is +- * used to mark a pte entry. + */ +-#define S390_SKIP_MASK GENMASK(3, 0) +-#if __BITS_PER_LONG == 64 +-#define PPC64_SKIP_MASK GENMASK(62, 62) +-#else +-#define PPC64_SKIP_MASK 0x0 +-#endif +-#define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK) +-#define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK) + #define RANDOM_NZVALUE GENMASK(7, 0) + + struct pgtable_debug_args { +@@ -510,8 +495,7 @@ static void __init pud_clear_tests(struct pgtable_debug_args *args) + return; + + pr_debug("Validating PUD clear\n"); +- pud = __pud(pud_val(pud) | RANDOM_ORVALUE); +- WRITE_ONCE(*args->pudp, pud); ++ WARN_ON(pud_none(pud)); + pud_clear(args->pudp); + pud = READ_ONCE(*args->pudp); + WARN_ON(!pud_none(pud)); +@@ -547,8 +531,7 @@ static void __init p4d_clear_tests(struct pgtable_debug_args *args) + return; + + pr_debug("Validating P4D clear\n"); +- p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE); +- WRITE_ONCE(*args->p4dp, p4d); ++ WARN_ON(p4d_none(p4d)); + p4d_clear(args->p4dp); + p4d = READ_ONCE(*args->p4dp); + WARN_ON(!p4d_none(p4d)); +@@ -581,8 +564,7 @@ static void __init pgd_clear_tests(struct pgtable_debug_args *args) + return; + + pr_debug("Validating PGD clear\n"); +- pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE); +- WRITE_ONCE(*args->pgdp, pgd); ++ WARN_ON(pgd_none(pgd)); + pgd_clear(args->pgdp); + pgd = READ_ONCE(*args->pgdp); + WARN_ON(!pgd_none(pgd)); +@@ -633,10 +615,8 @@ static void __init pte_clear_tests(struct pgtable_debug_args *args) + if (WARN_ON(!args->ptep)) + return; + +-#ifndef CONFIG_RISCV +- pte = __pte(pte_val(pte) | RANDOM_ORVALUE); +-#endif + set_pte_at(args->mm, args->vaddr, args->ptep, pte); ++ WARN_ON(pte_none(pte)); + flush_dcache_page(page); + barrier(); + ptep_clear(args->mm, args->vaddr, args->ptep); +@@ -649,8 +629,7 @@ static void __init pmd_clear_tests(struct pgtable_debug_args *args) + pmd_t pmd = READ_ONCE(*args->pmdp); + + pr_debug("Validating PMD clear\n"); +- pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE); +- WRITE_ONCE(*args->pmdp, pmd); ++ WARN_ON(pmd_none(pmd)); + pmd_clear(args->pmdp); + pmd = READ_ONCE(*args->pmdp); + WARN_ON(!pmd_none(pmd)); +diff --git a/mm/gup.c b/mm/gup.c +index f50fe2219a13b6..fdd75384160d8d 100644 +--- a/mm/gup.c ++++ b/mm/gup.c +@@ -97,95 +97,6 @@ static inline struct folio *try_get_folio(struct page *page, int refs) + return folio; + } + +-/** +- * try_grab_folio() - Attempt to get or pin a folio. +- * @page: pointer to page to be grabbed +- * @refs: the value to (effectively) add to the folio's refcount +- * @flags: gup flags: these are the FOLL_* flag values. +- * +- * "grab" names in this file mean, "look at flags to decide whether to use +- * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. +- * +- * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the +- * same time. (That's true throughout the get_user_pages*() and +- * pin_user_pages*() APIs.) Cases: +- * +- * FOLL_GET: folio's refcount will be incremented by @refs. +- * +- * FOLL_PIN on large folios: folio's refcount will be incremented by +- * @refs, and its pincount will be incremented by @refs. +- * +- * FOLL_PIN on single-page folios: folio's refcount will be incremented by +- * @refs * GUP_PIN_COUNTING_BIAS. +- * +- * Return: The folio containing @page (with refcount appropriately +- * incremented) for success, or NULL upon failure. If neither FOLL_GET +- * nor FOLL_PIN was set, that's considered failure, and furthermore, +- * a likely bug in the caller, so a warning is also emitted. +- */ +-struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags) +-{ +- struct folio *folio; +- +- if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0)) +- return NULL; +- +- if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page))) +- return NULL; +- +- if (flags & FOLL_GET) +- return try_get_folio(page, refs); +- +- /* FOLL_PIN is set */ +- +- /* +- * Don't take a pin on the zero page - it's not going anywhere +- * and it is used in a *lot* of places. +- */ +- if (is_zero_page(page)) +- return page_folio(page); +- +- folio = try_get_folio(page, refs); +- if (!folio) +- return NULL; +- +- /* +- * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a +- * right zone, so fail and let the caller fall back to the slow +- * path. +- */ +- if (unlikely((flags & FOLL_LONGTERM) && +- !folio_is_longterm_pinnable(folio))) { +- if (!put_devmap_managed_page_refs(&folio->page, refs)) +- folio_put_refs(folio, refs); +- return NULL; +- } +- +- /* +- * When pinning a large folio, use an exact count to track it. +- * +- * However, be sure to *also* increment the normal folio +- * refcount field at least once, so that the folio really +- * is pinned. That's why the refcount from the earlier +- * try_get_folio() is left intact. +- */ +- if (folio_test_large(folio)) +- atomic_add(refs, &folio->_pincount); +- else +- folio_ref_add(folio, +- refs * (GUP_PIN_COUNTING_BIAS - 1)); +- /* +- * Adjust the pincount before re-checking the PTE for changes. +- * This is essentially a smp_mb() and is paired with a memory +- * barrier in page_try_share_anon_rmap(). +- */ +- smp_mb__after_atomic(); +- +- node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); +- +- return folio; +-} +- + static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) + { + if (flags & FOLL_PIN) { +@@ -203,58 +114,59 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) + } + + /** +- * try_grab_page() - elevate a page's refcount by a flag-dependent amount +- * @page: pointer to page to be grabbed +- * @flags: gup flags: these are the FOLL_* flag values. ++ * try_grab_folio() - add a folio's refcount by a flag-dependent amount ++ * @folio: pointer to folio to be grabbed ++ * @refs: the value to (effectively) add to the folio's refcount ++ * @flags: gup flags: these are the FOLL_* flag values + * + * This might not do anything at all, depending on the flags argument. + * + * "grab" names in this file mean, "look at flags to decide whether to use +- * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount. ++ * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. + * + * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same +- * time. Cases: please see the try_grab_folio() documentation, with +- * "refs=1". ++ * time. + * + * Return: 0 for success, or if no action was required (if neither FOLL_PIN + * nor FOLL_GET was set, nothing is done). A negative error code for failure: + * +- * -ENOMEM FOLL_GET or FOLL_PIN was set, but the page could not ++ * -ENOMEM FOLL_GET or FOLL_PIN was set, but the folio could not + * be grabbed. ++ * ++ * It is called when we have a stable reference for the folio, typically in ++ * GUP slow path. + */ +-int __must_check try_grab_page(struct page *page, unsigned int flags) ++int __must_check try_grab_folio(struct folio *folio, int refs, ++ unsigned int flags) + { +- struct folio *folio = page_folio(page); +- + if (WARN_ON_ONCE(folio_ref_count(folio) <= 0)) + return -ENOMEM; + +- if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page))) ++ if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(&folio->page))) + return -EREMOTEIO; + + if (flags & FOLL_GET) +- folio_ref_inc(folio); ++ folio_ref_add(folio, refs); + else if (flags & FOLL_PIN) { + /* + * Don't take a pin on the zero page - it's not going anywhere + * and it is used in a *lot* of places. + */ +- if (is_zero_page(page)) ++ if (is_zero_folio(folio)) + return 0; + + /* +- * Similar to try_grab_folio(): be sure to *also* +- * increment the normal page refcount field at least once, ++ * Increment the normal page refcount field at least once, + * so that the page really is pinned. + */ + if (folio_test_large(folio)) { +- folio_ref_add(folio, 1); +- atomic_add(1, &folio->_pincount); ++ folio_ref_add(folio, refs); ++ atomic_add(refs, &folio->_pincount); + } else { +- folio_ref_add(folio, GUP_PIN_COUNTING_BIAS); ++ folio_ref_add(folio, refs * GUP_PIN_COUNTING_BIAS); + } + +- node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1); ++ node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); + } + + return 0; +@@ -647,8 +559,8 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, + VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && + !PageAnonExclusive(page), page); + +- /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */ +- ret = try_grab_page(page, flags); ++ /* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */ ++ ret = try_grab_folio(page_folio(page), 1, flags); + if (unlikely(ret)) { + page = ERR_PTR(ret); + goto out; +@@ -899,7 +811,7 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, + goto unmap; + *page = pte_page(entry); + } +- ret = try_grab_page(*page, gup_flags); ++ ret = try_grab_folio(page_folio(*page), 1, gup_flags); + if (unlikely(ret)) + goto unmap; + out: +@@ -1302,20 +1214,19 @@ static long __get_user_pages(struct mm_struct *mm, + * pages. + */ + if (page_increm > 1) { +- struct folio *folio; ++ struct folio *folio = page_folio(page); + + /* + * Since we already hold refcount on the + * large folio, this should never fail. + */ +- folio = try_grab_folio(page, page_increm - 1, +- foll_flags); +- if (WARN_ON_ONCE(!folio)) { ++ if (try_grab_folio(folio, page_increm - 1, ++ foll_flags)) { + /* + * Release the 1st page ref if the + * folio is problematic, fail hard. + */ +- gup_put_folio(page_folio(page), 1, ++ gup_put_folio(folio, 1, + foll_flags); + ret = -EFAULT; + goto out; +@@ -2541,6 +2452,102 @@ static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, + } + } + ++/** ++ * try_grab_folio_fast() - Attempt to get or pin a folio in fast path. ++ * @page: pointer to page to be grabbed ++ * @refs: the value to (effectively) add to the folio's refcount ++ * @flags: gup flags: these are the FOLL_* flag values. ++ * ++ * "grab" names in this file mean, "look at flags to decide whether to use ++ * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. ++ * ++ * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the ++ * same time. (That's true throughout the get_user_pages*() and ++ * pin_user_pages*() APIs.) Cases: ++ * ++ * FOLL_GET: folio's refcount will be incremented by @refs. ++ * ++ * FOLL_PIN on large folios: folio's refcount will be incremented by ++ * @refs, and its pincount will be incremented by @refs. ++ * ++ * FOLL_PIN on single-page folios: folio's refcount will be incremented by ++ * @refs * GUP_PIN_COUNTING_BIAS. ++ * ++ * Return: The folio containing @page (with refcount appropriately ++ * incremented) for success, or NULL upon failure. If neither FOLL_GET ++ * nor FOLL_PIN was set, that's considered failure, and furthermore, ++ * a likely bug in the caller, so a warning is also emitted. ++ * ++ * It uses add ref unless zero to elevate the folio refcount and must be called ++ * in fast path only. ++ */ ++static struct folio *try_grab_folio_fast(struct page *page, int refs, ++ unsigned int flags) ++{ ++ struct folio *folio; ++ ++ /* Raise warn if it is not called in fast GUP */ ++ VM_WARN_ON_ONCE(!irqs_disabled()); ++ ++ if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0)) ++ return NULL; ++ ++ if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page))) ++ return NULL; ++ ++ if (flags & FOLL_GET) ++ return try_get_folio(page, refs); ++ ++ /* FOLL_PIN is set */ ++ ++ /* ++ * Don't take a pin on the zero page - it's not going anywhere ++ * and it is used in a *lot* of places. ++ */ ++ if (is_zero_page(page)) ++ return page_folio(page); ++ ++ folio = try_get_folio(page, refs); ++ if (!folio) ++ return NULL; ++ ++ /* ++ * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a ++ * right zone, so fail and let the caller fall back to the slow ++ * path. ++ */ ++ if (unlikely((flags & FOLL_LONGTERM) && ++ !folio_is_longterm_pinnable(folio))) { ++ if (!put_devmap_managed_page_refs(&folio->page, refs)) ++ folio_put_refs(folio, refs); ++ return NULL; ++ } ++ ++ /* ++ * When pinning a large folio, use an exact count to track it. ++ * ++ * However, be sure to *also* increment the normal folio ++ * refcount field at least once, so that the folio really ++ * is pinned. That's why the refcount from the earlier ++ * try_get_folio() is left intact. ++ */ ++ if (folio_test_large(folio)) ++ atomic_add(refs, &folio->_pincount); ++ else ++ folio_ref_add(folio, ++ refs * (GUP_PIN_COUNTING_BIAS - 1)); ++ /* ++ * Adjust the pincount before re-checking the PTE for changes. ++ * This is essentially a smp_mb() and is paired with a memory ++ * barrier in folio_try_share_anon_rmap_*(). ++ */ ++ smp_mb__after_atomic(); ++ ++ node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); ++ ++ return folio; ++} ++ + #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL + /* + * Fast-gup relies on pte change detection to avoid concurrent pgtable +@@ -2605,7 +2612,7 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, + VM_BUG_ON(!pfn_valid(pte_pfn(pte))); + page = pte_page(pte); + +- folio = try_grab_folio(page, 1, flags); ++ folio = try_grab_folio_fast(page, 1, flags); + if (!folio) + goto pte_unmap; + +@@ -2699,7 +2706,7 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr, + + SetPageReferenced(page); + pages[*nr] = page; +- if (unlikely(try_grab_page(page, flags))) { ++ if (unlikely(try_grab_folio(page_folio(page), 1, flags))) { + undo_dev_pagemap(nr, nr_start, flags, pages); + break; + } +@@ -2808,7 +2815,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, + page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT); + refs = record_subpages(page, addr, end, pages + *nr); + +- folio = try_grab_folio(page, refs, flags); ++ folio = try_grab_folio_fast(page, refs, flags); + if (!folio) + return 0; + +@@ -2879,7 +2886,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, + page = nth_page(pmd_page(orig), (addr & ~PMD_MASK) >> PAGE_SHIFT); + refs = record_subpages(page, addr, end, pages + *nr); + +- folio = try_grab_folio(page, refs, flags); ++ folio = try_grab_folio_fast(page, refs, flags); + if (!folio) + return 0; + +@@ -2923,7 +2930,7 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, + page = nth_page(pud_page(orig), (addr & ~PUD_MASK) >> PAGE_SHIFT); + refs = record_subpages(page, addr, end, pages + *nr); + +- folio = try_grab_folio(page, refs, flags); ++ folio = try_grab_folio_fast(page, refs, flags); + if (!folio) + return 0; + +@@ -2963,7 +2970,7 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, + page = nth_page(pgd_page(orig), (addr & ~PGDIR_MASK) >> PAGE_SHIFT); + refs = record_subpages(page, addr, end, pages + *nr); + +- folio = try_grab_folio(page, refs, flags); ++ folio = try_grab_folio_fast(page, refs, flags); + if (!folio) + return 0; + +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 7ac2877e76629b..f2816c9a1f3ec8 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -1056,7 +1056,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, + if (!*pgmap) + return ERR_PTR(-EFAULT); + page = pfn_to_page(pfn); +- ret = try_grab_page(page, flags); ++ ret = try_grab_folio(page_folio(page), 1, flags); + if (ret) + page = ERR_PTR(ret); + +@@ -1214,7 +1214,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, + return ERR_PTR(-EFAULT); + page = pfn_to_page(pfn); + +- ret = try_grab_page(page, flags); ++ ret = try_grab_folio(page_folio(page), 1, flags); + if (ret) + page = ERR_PTR(ret); + +@@ -1475,7 +1475,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, + VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && + !PageAnonExclusive(page), page); + +- ret = try_grab_page(page, flags); ++ ret = try_grab_folio(page_folio(page), 1, flags); + if (ret) + return ERR_PTR(ret); + +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index fb7a531fce7174..0acb04c3e95291 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -6532,7 +6532,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, + * try_grab_page() should always be able to get the page here, + * because we hold the ptl lock and have verified pte_present(). + */ +- ret = try_grab_page(page, flags); ++ ret = try_grab_folio(page_folio(page), 1, flags); + + if (WARN_ON_ONCE(ret)) { + page = ERR_PTR(ret); +diff --git a/mm/internal.h b/mm/internal.h +index abed947f784b7b..ef8d787a510c5c 100644 +--- a/mm/internal.h ++++ b/mm/internal.h +@@ -938,8 +938,8 @@ int migrate_device_coherent_page(struct page *page); + /* + * mm/gup.c + */ +-struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags); +-int __must_check try_grab_page(struct page *page, unsigned int flags); ++int __must_check try_grab_folio(struct folio *folio, int refs, ++ unsigned int flags); + + /* + * mm/huge_memory.c +diff --git a/mm/page_table_check.c b/mm/page_table_check.c +index 6363f93a47c691..509c6ef8de400e 100644 +--- a/mm/page_table_check.c ++++ b/mm/page_table_check.c +@@ -7,6 +7,8 @@ + #include + #include + #include ++#include ++#include + + #undef pr_fmt + #define pr_fmt(fmt) "page_table_check: " fmt +@@ -191,6 +193,22 @@ void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud) + } + EXPORT_SYMBOL(__page_table_check_pud_clear); + ++/* Whether the swap entry cached writable information */ ++static inline bool swap_cached_writable(swp_entry_t entry) ++{ ++ return is_writable_device_exclusive_entry(entry) || ++ is_writable_device_private_entry(entry) || ++ is_writable_migration_entry(entry); ++} ++ ++static inline void page_table_check_pte_flags(pte_t pte) ++{ ++ if (pte_present(pte) && pte_uffd_wp(pte)) ++ WARN_ON_ONCE(pte_write(pte)); ++ else if (is_swap_pte(pte) && pte_swp_uffd_wp(pte)) ++ WARN_ON_ONCE(swap_cached_writable(pte_to_swp_entry(pte))); ++} ++ + void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte, + unsigned int nr) + { +@@ -199,6 +217,8 @@ void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte, + if (&init_mm == mm) + return; + ++ page_table_check_pte_flags(pte); ++ + for (i = 0; i < nr; i++) + __page_table_check_pte_clear(mm, ptep_get(ptep + i)); + if (pte_user_accessible_page(pte)) +@@ -206,11 +226,21 @@ void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte, + } + EXPORT_SYMBOL(__page_table_check_ptes_set); + ++static inline void page_table_check_pmd_flags(pmd_t pmd) ++{ ++ if (pmd_present(pmd) && pmd_uffd_wp(pmd)) ++ WARN_ON_ONCE(pmd_write(pmd)); ++ else if (is_swap_pmd(pmd) && pmd_swp_uffd_wp(pmd)) ++ WARN_ON_ONCE(swap_cached_writable(pmd_to_swp_entry(pmd))); ++} ++ + void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd) + { + if (&init_mm == mm) + return; + ++ page_table_check_pmd_flags(pmd); ++ + __page_table_check_pmd_clear(mm, *pmdp); + if (pmd_user_accessible_page(pmd)) { + page_table_check_set(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT, +diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c +index b54e8a530f55a1..29aa07e9db9d71 100644 +--- a/net/bluetooth/rfcomm/sock.c ++++ b/net/bluetooth/rfcomm/sock.c +@@ -629,7 +629,7 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, + + switch (optname) { + case RFCOMM_LM: +- if (copy_from_sockptr(&opt, optval, sizeof(u32))) { ++ if (bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen)) { + err = -EFAULT; + break; + } +@@ -664,7 +664,6 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, + struct sock *sk = sock->sk; + struct bt_security sec; + int err = 0; +- size_t len; + u32 opt; + + BT_DBG("sk %p", sk); +@@ -686,11 +685,9 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, + + sec.level = BT_SECURITY_LOW; + +- len = min_t(unsigned int, sizeof(sec), optlen); +- if (copy_from_sockptr(&sec, optval, len)) { +- err = -EFAULT; ++ err = bt_copy_from_sockptr(&sec, sizeof(sec), optval, optlen); ++ if (err) + break; +- } + + if (sec.level > BT_SECURITY_HIGH) { + err = -EINVAL; +@@ -706,10 +703,9 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, + break; + } + +- if (copy_from_sockptr(&opt, optval, sizeof(u32))) { +- err = -EFAULT; ++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen); ++ if (err) + break; +- } + + if (opt) + set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); +diff --git a/net/core/filter.c b/net/core/filter.c +index 8cb44cd29967bb..be313928d272c6 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -2271,12 +2271,12 @@ static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev, + + err = bpf_out_neigh_v6(net, skb, dev, nh); + if (unlikely(net_xmit_eval(err))) +- dev->stats.tx_errors++; ++ DEV_STATS_INC(dev, tx_errors); + else + ret = NET_XMIT_SUCCESS; + goto out_xmit; + out_drop: +- dev->stats.tx_errors++; ++ DEV_STATS_INC(dev, tx_errors); + kfree_skb(skb); + out_xmit: + return ret; +@@ -2378,12 +2378,12 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev, + + err = bpf_out_neigh_v4(net, skb, dev, nh); + if (unlikely(net_xmit_eval(err))) +- dev->stats.tx_errors++; ++ DEV_STATS_INC(dev, tx_errors); + else + ret = NET_XMIT_SUCCESS; + goto out_xmit; + out_drop: +- dev->stats.tx_errors++; ++ DEV_STATS_INC(dev, tx_errors); + kfree_skb(skb); + out_xmit: + return ret; +diff --git a/net/ipv4/fou_core.c b/net/ipv4/fou_core.c +index 0c41076e31edad..b38b82ae903de0 100644 +--- a/net/ipv4/fou_core.c ++++ b/net/ipv4/fou_core.c +@@ -433,7 +433,7 @@ static struct sk_buff *gue_gro_receive(struct sock *sk, + + offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; + ops = rcu_dereference(offloads[proto]); +- if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive)) ++ if (!ops || !ops->callbacks.gro_receive) + goto out; + + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); +diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c +index b71f94a5932ac0..e0883ba709b0bf 100644 +--- a/net/ipv4/tcp_metrics.c ++++ b/net/ipv4/tcp_metrics.c +@@ -899,11 +899,13 @@ static void tcp_metrics_flush_all(struct net *net) + unsigned int row; + + for (row = 0; row < max_rows; row++, hb++) { +- struct tcp_metrics_block __rcu **pp; ++ struct tcp_metrics_block __rcu **pp = &hb->chain; + bool match; + ++ if (!rcu_access_pointer(*pp)) ++ continue; ++ + spin_lock_bh(&tcp_metrics_lock); +- pp = &hb->chain; + for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) { + match = net ? net_eq(tm_net(tm), net) : + !refcount_read(&tm_net(tm)->ns.count); +@@ -915,6 +917,7 @@ static void tcp_metrics_flush_all(struct net *net) + } + } + spin_unlock_bh(&tcp_metrics_lock); ++ cond_resched(); + } + } + +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c +index 6e3bfb46af44d3..52b048807feae5 100644 +--- a/net/mac80211/iface.c ++++ b/net/mac80211/iface.c +@@ -251,9 +251,9 @@ static int ieee80211_can_powered_addr_change(struct ieee80211_sub_if_data *sdata + return ret; + } + +-static int ieee80211_change_mac(struct net_device *dev, void *addr) ++static int _ieee80211_change_mac(struct ieee80211_sub_if_data *sdata, ++ void *addr) + { +- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct sockaddr *sa = addr; + bool check_dup = true; +@@ -278,7 +278,7 @@ static int ieee80211_change_mac(struct net_device *dev, void *addr) + + if (live) + drv_remove_interface(local, sdata); +- ret = eth_mac_addr(dev, sa); ++ ret = eth_mac_addr(sdata->dev, sa); + + if (ret == 0) { + memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN); +@@ -294,6 +294,27 @@ static int ieee80211_change_mac(struct net_device *dev, void *addr) + return ret; + } + ++static int ieee80211_change_mac(struct net_device *dev, void *addr) ++{ ++ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); ++ struct ieee80211_local *local = sdata->local; ++ int ret; ++ ++ /* ++ * This happens during unregistration if there's a bond device ++ * active (maybe other cases?) and we must get removed from it. ++ * But we really don't care anymore if it's not registered now. ++ */ ++ if (!dev->ieee80211_ptr->registered) ++ return 0; ++ ++ wiphy_lock(local->hw.wiphy); ++ ret = _ieee80211_change_mac(sdata, addr); ++ wiphy_unlock(local->hw.wiphy); ++ ++ return ret; ++} ++ + static inline int identical_mac_addr_allowed(int type1, int type2) + { + return type1 == NL80211_IFTYPE_MONITOR || +diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c +index 819157bbb5a2c6..d5344563e525c9 100644 +--- a/net/nfc/llcp_sock.c ++++ b/net/nfc/llcp_sock.c +@@ -252,10 +252,10 @@ static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname, + break; + } + +- if (copy_from_sockptr(&opt, optval, sizeof(u32))) { +- err = -EFAULT; ++ err = copy_safe_from_sockptr(&opt, sizeof(opt), ++ optval, optlen); ++ if (err) + break; +- } + + if (opt > LLCP_MAX_RW) { + err = -EINVAL; +@@ -274,10 +274,10 @@ static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname, + break; + } + +- if (copy_from_sockptr(&opt, optval, sizeof(u32))) { +- err = -EFAULT; ++ err = copy_safe_from_sockptr(&opt, sizeof(opt), ++ optval, optlen); ++ if (err) + break; +- } + + if (opt > LLCP_MAX_MIUX) { + err = -EINVAL; +diff --git a/net/rds/recv.c b/net/rds/recv.c +index c71b923764fd7c..5627f80013f8b1 100644 +--- a/net/rds/recv.c ++++ b/net/rds/recv.c +@@ -425,6 +425,7 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc, + struct sock *sk = rds_rs_to_sk(rs); + int ret = 0; + unsigned long flags; ++ struct rds_incoming *to_drop = NULL; + + write_lock_irqsave(&rs->rs_recv_lock, flags); + if (!list_empty(&inc->i_item)) { +@@ -435,11 +436,14 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc, + -be32_to_cpu(inc->i_hdr.h_len), + inc->i_hdr.h_dport); + list_del_init(&inc->i_item); +- rds_inc_put(inc); ++ to_drop = inc; + } + } + write_unlock_irqrestore(&rs->rs_recv_lock, flags); + ++ if (to_drop) ++ rds_inc_put(to_drop); ++ + rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop); + return ret; + } +@@ -758,16 +762,21 @@ void rds_clear_recv_queue(struct rds_sock *rs) + struct sock *sk = rds_rs_to_sk(rs); + struct rds_incoming *inc, *tmp; + unsigned long flags; ++ LIST_HEAD(to_drop); + + write_lock_irqsave(&rs->rs_recv_lock, flags); + list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) { + rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong, + -be32_to_cpu(inc->i_hdr.h_len), + inc->i_hdr.h_dport); ++ list_move(&inc->i_item, &to_drop); ++ } ++ write_unlock_irqrestore(&rs->rs_recv_lock, flags); ++ ++ list_for_each_entry_safe(inc, tmp, &to_drop, i_item) { + list_del_init(&inc->i_item); + rds_inc_put(inc); + } +- write_unlock_irqrestore(&rs->rs_recv_lock, flags); + } + + /* +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c +index 4023c955036b12..6ab9359c1706f1 100644 +--- a/net/sched/sch_generic.c ++++ b/net/sched/sch_generic.c +@@ -522,8 +522,9 @@ static void dev_watchdog(struct timer_list *t) + + if (unlikely(timedout_ms)) { + trace_net_dev_xmit_timeout(dev, i); +- WARN_ONCE(1, "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out %u ms\n", +- dev->name, netdev_drivername(dev), i, timedout_ms); ++ netdev_crit(dev, "NETDEV WATCHDOG: CPU: %d: transmit queue %u timed out %u ms\n", ++ raw_smp_processor_id(), ++ i, timedout_ms); + netif_freeze_queues(dev); + dev->netdev_ops->ndo_tx_timeout(dev, i); + netif_unfreeze_queues(dev); +diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c +index 7182c5a450fb5b..5c165218180588 100644 +--- a/net/sctp/inqueue.c ++++ b/net/sctp/inqueue.c +@@ -38,6 +38,14 @@ void sctp_inq_init(struct sctp_inq *queue) + INIT_WORK(&queue->immediate, NULL); + } + ++/* Properly release the chunk which is being worked on. */ ++static inline void sctp_inq_chunk_free(struct sctp_chunk *chunk) ++{ ++ if (chunk->head_skb) ++ chunk->skb = chunk->head_skb; ++ sctp_chunk_free(chunk); ++} ++ + /* Release the memory associated with an SCTP inqueue. */ + void sctp_inq_free(struct sctp_inq *queue) + { +@@ -53,7 +61,7 @@ void sctp_inq_free(struct sctp_inq *queue) + * free it as well. + */ + if (queue->in_progress) { +- sctp_chunk_free(queue->in_progress); ++ sctp_inq_chunk_free(queue->in_progress); + queue->in_progress = NULL; + } + } +@@ -130,9 +138,7 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) + goto new_skb; + } + +- if (chunk->head_skb) +- chunk->skb = chunk->head_skb; +- sctp_chunk_free(chunk); ++ sctp_inq_chunk_free(chunk); + chunk = queue->in_progress = NULL; + } else { + /* Nothing to do. Next chunk in the packet, please. */ +diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c +index 65fc1297c6dfa4..383860cb1d5b0f 100644 +--- a/net/sunrpc/stats.c ++++ b/net/sunrpc/stats.c +@@ -314,7 +314,7 @@ EXPORT_SYMBOL_GPL(rpc_proc_unregister); + struct proc_dir_entry * + svc_proc_register(struct net *net, struct svc_stat *statp, const struct proc_ops *proc_ops) + { +- return do_register(net, statp->program->pg_name, statp, proc_ops); ++ return do_register(net, statp->program->pg_name, net, proc_ops); + } + EXPORT_SYMBOL_GPL(svc_proc_register); + +diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c +index 691499d1d2315c..029c49065016ac 100644 +--- a/net/sunrpc/svc.c ++++ b/net/sunrpc/svc.c +@@ -453,8 +453,8 @@ __svc_init_bc(struct svc_serv *serv) + * Create an RPC service + */ + static struct svc_serv * +-__svc_create(struct svc_program *prog, unsigned int bufsize, int npools, +- int (*threadfn)(void *data)) ++__svc_create(struct svc_program *prog, struct svc_stat *stats, ++ unsigned int bufsize, int npools, int (*threadfn)(void *data)) + { + struct svc_serv *serv; + unsigned int vers; +@@ -466,7 +466,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, + serv->sv_name = prog->pg_name; + serv->sv_program = prog; + kref_init(&serv->sv_refcnt); +- serv->sv_stats = prog->pg_stats; ++ serv->sv_stats = stats; + if (bufsize > RPCSVC_MAXPAYLOAD) + bufsize = RPCSVC_MAXPAYLOAD; + serv->sv_max_payload = bufsize? bufsize : 4096; +@@ -532,26 +532,28 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, + struct svc_serv *svc_create(struct svc_program *prog, unsigned int bufsize, + int (*threadfn)(void *data)) + { +- return __svc_create(prog, bufsize, 1, threadfn); ++ return __svc_create(prog, NULL, bufsize, 1, threadfn); + } + EXPORT_SYMBOL_GPL(svc_create); + + /** + * svc_create_pooled - Create an RPC service with pooled threads + * @prog: the RPC program the new service will handle ++ * @stats: the stats struct if desired + * @bufsize: maximum message size for @prog + * @threadfn: a function to service RPC requests for @prog + * + * Returns an instantiated struct svc_serv object or NULL. + */ + struct svc_serv *svc_create_pooled(struct svc_program *prog, ++ struct svc_stat *stats, + unsigned int bufsize, + int (*threadfn)(void *data)) + { + struct svc_serv *serv; + unsigned int npools = svc_pool_map_get(); + +- serv = __svc_create(prog, bufsize, npools, threadfn); ++ serv = __svc_create(prog, stats, bufsize, npools, threadfn); + if (!serv) + goto out_err; + return serv; +@@ -1377,7 +1379,8 @@ svc_process_common(struct svc_rqst *rqstp) + goto err_bad_proc; + + /* Syntactic check complete */ +- serv->sv_stats->rpccnt++; ++ if (serv->sv_stats) ++ serv->sv_stats->rpccnt++; + trace_svc_process(rqstp, progp->pg_name); + + aoffset = xdr_stream_pos(xdr); +@@ -1429,7 +1432,8 @@ svc_process_common(struct svc_rqst *rqstp) + goto close_xprt; + + err_bad_rpc: +- serv->sv_stats->rpcbadfmt++; ++ if (serv->sv_stats) ++ serv->sv_stats->rpcbadfmt++; + xdr_stream_encode_u32(xdr, RPC_MSG_DENIED); + xdr_stream_encode_u32(xdr, RPC_MISMATCH); + /* Only RPCv2 supported */ +@@ -1440,7 +1444,8 @@ svc_process_common(struct svc_rqst *rqstp) + err_bad_auth: + dprintk("svc: authentication failed (%d)\n", + be32_to_cpu(rqstp->rq_auth_stat)); +- serv->sv_stats->rpcbadauth++; ++ if (serv->sv_stats) ++ serv->sv_stats->rpcbadauth++; + /* Restore write pointer to location of reply status: */ + xdr_truncate_encode(xdr, XDR_UNIT * 2); + xdr_stream_encode_u32(xdr, RPC_MSG_DENIED); +@@ -1450,7 +1455,8 @@ svc_process_common(struct svc_rqst *rqstp) + + err_bad_prog: + dprintk("svc: unknown program %d\n", rqstp->rq_prog); +- serv->sv_stats->rpcbadfmt++; ++ if (serv->sv_stats) ++ serv->sv_stats->rpcbadfmt++; + *rqstp->rq_accept_statp = rpc_prog_unavail; + goto sendit; + +@@ -1458,7 +1464,8 @@ svc_process_common(struct svc_rqst *rqstp) + svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n", + rqstp->rq_vers, rqstp->rq_prog, progp->pg_name); + +- serv->sv_stats->rpcbadfmt++; ++ if (serv->sv_stats) ++ serv->sv_stats->rpcbadfmt++; + *rqstp->rq_accept_statp = rpc_prog_mismatch; + + /* +@@ -1472,19 +1479,22 @@ svc_process_common(struct svc_rqst *rqstp) + err_bad_proc: + svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc); + +- serv->sv_stats->rpcbadfmt++; ++ if (serv->sv_stats) ++ serv->sv_stats->rpcbadfmt++; + *rqstp->rq_accept_statp = rpc_proc_unavail; + goto sendit; + + err_garbage_args: + svc_printk(rqstp, "failed to decode RPC header\n"); + +- serv->sv_stats->rpcbadfmt++; ++ if (serv->sv_stats) ++ serv->sv_stats->rpcbadfmt++; + *rqstp->rq_accept_statp = rpc_garbage_args; + goto sendit; + + err_system_err: +- serv->sv_stats->rpcbadfmt++; ++ if (serv->sv_stats) ++ serv->sv_stats->rpcbadfmt++; + *rqstp->rq_accept_statp = rpc_system_err; + goto sendit; + } +@@ -1536,7 +1546,8 @@ void svc_process(struct svc_rqst *rqstp) + out_baddir: + svc_printk(rqstp, "bad direction 0x%08x, dropping request\n", + be32_to_cpu(*p)); +- rqstp->rq_server->sv_stats->rpcbadfmt++; ++ if (rqstp->rq_server->sv_stats) ++ rqstp->rq_server->sv_stats->rpcbadfmt++; + out_drop: + svc_drop(rqstp); + } +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index be5c42d6ffbeab..2b2dc46dc701f9 100644 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -468,6 +468,10 @@ static struct netlink_range_validation nl80211_punct_bitmap_range = { + .max = 0xffff, + }; + ++static struct netlink_range_validation q_range = { ++ .max = INT_MAX, ++}; ++ + static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { + [0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD }, + [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, +@@ -750,7 +754,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { + + [NL80211_ATTR_TXQ_LIMIT] = { .type = NLA_U32 }, + [NL80211_ATTR_TXQ_MEMORY_LIMIT] = { .type = NLA_U32 }, +- [NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 }, ++ [NL80211_ATTR_TXQ_QUANTUM] = NLA_POLICY_FULL_RANGE(NLA_U32, &q_range), + [NL80211_ATTR_HE_CAPABILITY] = + NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_he_capa, + NL80211_HE_MAX_CAPABILITY_LEN), +diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c +index d2fbcf963cdf6d..07ff471ed6aee0 100644 +--- a/samples/bpf/map_perf_test_user.c ++++ b/samples/bpf/map_perf_test_user.c +@@ -370,7 +370,7 @@ static void run_perf_test(int tasks) + + static void fill_lpm_trie(void) + { +- struct bpf_lpm_trie_key *key; ++ struct bpf_lpm_trie_key_u8 *key; + unsigned long value = 0; + unsigned int i; + int r; +diff --git a/samples/bpf/xdp_router_ipv4_user.c b/samples/bpf/xdp_router_ipv4_user.c +index 9d41db09c4800f..266fdd0b025dc6 100644 +--- a/samples/bpf/xdp_router_ipv4_user.c ++++ b/samples/bpf/xdp_router_ipv4_user.c +@@ -91,7 +91,7 @@ static int recv_msg(struct sockaddr_nl sock_addr, int sock) + static void read_route(struct nlmsghdr *nh, int nll) + { + char dsts[24], gws[24], ifs[16], dsts_len[24], metrics[24]; +- struct bpf_lpm_trie_key *prefix_key; ++ struct bpf_lpm_trie_key_u8 *prefix_key; + struct rtattr *rt_attr; + struct rtmsg *rt_msg; + int rtm_family; +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c +index 8b58a7864703ee..7e8fca0b066280 100644 +--- a/sound/soc/soc-topology.c ++++ b/sound/soc/soc-topology.c +@@ -1021,6 +1021,7 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg, + struct snd_soc_tplg_hdr *hdr) + { + struct snd_soc_dapm_context *dapm = &tplg->comp->dapm; ++ const size_t maxlen = SNDRV_CTL_ELEM_ID_NAME_MAXLEN; + struct snd_soc_tplg_dapm_graph_elem *elem; + struct snd_soc_dapm_route *route; + int count, i; +@@ -1044,39 +1045,22 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg, + tplg->pos += sizeof(struct snd_soc_tplg_dapm_graph_elem); + + /* validate routes */ +- if (strnlen(elem->source, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == +- SNDRV_CTL_ELEM_ID_NAME_MAXLEN) { +- ret = -EINVAL; +- break; +- } +- if (strnlen(elem->sink, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == +- SNDRV_CTL_ELEM_ID_NAME_MAXLEN) { +- ret = -EINVAL; +- break; +- } +- if (strnlen(elem->control, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == +- SNDRV_CTL_ELEM_ID_NAME_MAXLEN) { ++ if ((strnlen(elem->source, maxlen) == maxlen) || ++ (strnlen(elem->sink, maxlen) == maxlen) || ++ (strnlen(elem->control, maxlen) == maxlen)) { + ret = -EINVAL; + break; + } + +- route->source = devm_kmemdup(tplg->dev, elem->source, +- min(strlen(elem->source), +- SNDRV_CTL_ELEM_ID_NAME_MAXLEN), +- GFP_KERNEL); +- route->sink = devm_kmemdup(tplg->dev, elem->sink, +- min(strlen(elem->sink), SNDRV_CTL_ELEM_ID_NAME_MAXLEN), +- GFP_KERNEL); ++ route->source = devm_kstrdup(tplg->dev, elem->source, GFP_KERNEL); ++ route->sink = devm_kstrdup(tplg->dev, elem->sink, GFP_KERNEL); + if (!route->source || !route->sink) { + ret = -ENOMEM; + break; + } + +- if (strnlen(elem->control, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) != 0) { +- route->control = devm_kmemdup(tplg->dev, elem->control, +- min(strlen(elem->control), +- SNDRV_CTL_ELEM_ID_NAME_MAXLEN), +- GFP_KERNEL); ++ if (strnlen(elem->control, maxlen) != 0) { ++ route->control = devm_kstrdup(tplg->dev, elem->control, GFP_KERNEL); + if (!route->control) { + ret = -ENOMEM; + break; +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c +index d1bdb0b93bda0c..8cc2d4937f3403 100644 +--- a/sound/usb/mixer.c ++++ b/sound/usb/mixer.c +@@ -2021,6 +2021,13 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, + bmaControls = ftr->bmaControls; + } + ++ if (channels > 32) { ++ usb_audio_info(state->chip, ++ "usbmixer: too many channels (%d) in unit %d\n", ++ channels, unitid); ++ return -EINVAL; ++ } ++ + /* parse the source unit */ + err = parse_audio_unit(state, hdr->bSourceID); + if (err < 0) +diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h +index fb09fd1767f289..ba6e346c8d669a 100644 +--- a/tools/include/uapi/linux/bpf.h ++++ b/tools/include/uapi/linux/bpf.h +@@ -77,12 +77,29 @@ struct bpf_insn { + __s32 imm; /* signed immediate constant */ + }; + +-/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ ++/* Deprecated: use struct bpf_lpm_trie_key_u8 (when the "data" member is needed for ++ * byte access) or struct bpf_lpm_trie_key_hdr (when using an alternative type for ++ * the trailing flexible array member) instead. ++ */ + struct bpf_lpm_trie_key { + __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ + __u8 data[0]; /* Arbitrary size */ + }; + ++/* Header for bpf_lpm_trie_key structs */ ++struct bpf_lpm_trie_key_hdr { ++ __u32 prefixlen; ++}; ++ ++/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry, with trailing byte array. */ ++struct bpf_lpm_trie_key_u8 { ++ union { ++ struct bpf_lpm_trie_key_hdr hdr; ++ __u32 prefixlen; ++ }; ++ __u8 data[]; /* Arbitrary size */ ++}; ++ + struct bpf_cgroup_storage_key { + __u64 cgroup_inode_id; /* cgroup inode id */ + __u32 attach_type; /* program attach type (enum bpf_attach_type) */ +diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c +index 3325da17ec81af..efaf622c28ddec 100644 +--- a/tools/testing/selftests/bpf/progs/map_ptr_kern.c ++++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c +@@ -316,7 +316,7 @@ struct lpm_trie { + } __attribute__((preserve_access_index)); + + struct lpm_key { +- struct bpf_lpm_trie_key trie_key; ++ struct bpf_lpm_trie_key_hdr trie_key; + __u32 data; + }; + +diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c +index c028d621c744da..d98c72dc563eaf 100644 +--- a/tools/testing/selftests/bpf/test_lpm_map.c ++++ b/tools/testing/selftests/bpf/test_lpm_map.c +@@ -211,7 +211,7 @@ static void test_lpm_map(int keysize) + volatile size_t n_matches, n_matches_after_delete; + size_t i, j, n_nodes, n_lookups; + struct tlpm_node *t, *list = NULL; +- struct bpf_lpm_trie_key *key; ++ struct bpf_lpm_trie_key_u8 *key; + uint8_t *data, *value; + int r, map; + +@@ -331,8 +331,8 @@ static void test_lpm_map(int keysize) + static void test_lpm_ipaddr(void) + { + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); +- struct bpf_lpm_trie_key *key_ipv4; +- struct bpf_lpm_trie_key *key_ipv6; ++ struct bpf_lpm_trie_key_u8 *key_ipv4; ++ struct bpf_lpm_trie_key_u8 *key_ipv6; + size_t key_size_ipv4; + size_t key_size_ipv6; + int map_fd_ipv4; +@@ -423,7 +423,7 @@ static void test_lpm_ipaddr(void) + static void test_lpm_delete(void) + { + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); +- struct bpf_lpm_trie_key *key; ++ struct bpf_lpm_trie_key_u8 *key; + size_t key_size; + int map_fd; + __u64 value; +@@ -532,7 +532,7 @@ static void test_lpm_delete(void) + static void test_lpm_get_next_key(void) + { + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); +- struct bpf_lpm_trie_key *key_p, *next_key_p; ++ struct bpf_lpm_trie_key_u8 *key_p, *next_key_p; + size_t key_size; + __u32 value = 0; + int map_fd; +@@ -693,9 +693,9 @@ static void *lpm_test_command(void *arg) + { + int i, j, ret, iter, key_size; + struct lpm_mt_test_info *info = arg; +- struct bpf_lpm_trie_key *key_p; ++ struct bpf_lpm_trie_key_u8 *key_p; + +- key_size = sizeof(struct bpf_lpm_trie_key) + sizeof(__u32); ++ key_size = sizeof(*key_p) + sizeof(__u32); + key_p = alloca(key_size); + for (iter = 0; iter < info->iter; iter++) + for (i = 0; i < MAX_TEST_KEYS; i++) { +@@ -717,7 +717,7 @@ static void *lpm_test_command(void *arg) + ret = bpf_map_lookup_elem(info->map_fd, key_p, &value); + assert(ret == 0 || errno == ENOENT); + } else { +- struct bpf_lpm_trie_key *next_key_p = alloca(key_size); ++ struct bpf_lpm_trie_key_u8 *next_key_p = alloca(key_size); + ret = bpf_map_get_next_key(info->map_fd, key_p, next_key_p); + assert(ret == 0 || errno == ENOENT || errno == ENOMEM); + } +@@ -752,7 +752,7 @@ static void test_lpm_multi_thread(void) + + /* create a trie */ + value_size = sizeof(__u32); +- key_size = sizeof(struct bpf_lpm_trie_key) + value_size; ++ key_size = sizeof(struct bpf_lpm_trie_key_hdr) + value_size; + map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size, value_size, 100, &opts); + + /* create 4 threads to test update, delete, lookup and get_next_key */ +diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c +index ad993ab3ac1819..bc36c91c4480f5 100644 +--- a/tools/testing/selftests/net/tls.c ++++ b/tools/testing/selftests/net/tls.c +@@ -707,6 +707,20 @@ TEST_F(tls, splice_from_pipe) + EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); + } + ++TEST_F(tls, splice_more) ++{ ++ unsigned int f = SPLICE_F_NONBLOCK | SPLICE_F_MORE | SPLICE_F_GIFT; ++ int send_len = TLS_PAYLOAD_MAX_LEN; ++ char mem_send[TLS_PAYLOAD_MAX_LEN]; ++ int i, send_pipe = 1; ++ int p[2]; ++ ++ ASSERT_GE(pipe(p), 0); ++ EXPECT_GE(write(p[1], mem_send, send_len), 0); ++ for (i = 0; i < 32; i++) ++ EXPECT_EQ(splice(p[0], NULL, self->fd, NULL, send_pipe, f), 1); ++} ++ + TEST_F(tls, splice_from_pipe2) + { + int send_len = 16000; diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.47-48.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.47-48.patch new file mode 100644 index 000000000000..a61ad39977d3 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.47-48.patch @@ -0,0 +1,14889 @@ +diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu +index 34b6f6ab47422..657bdee28d845 100644 +--- a/Documentation/ABI/testing/sysfs-devices-system-cpu ++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu +@@ -565,7 +565,8 @@ Description: Control Symmetric Multi Threading (SMT) + ================ ========================================= + + If control status is "forceoff" or "notsupported" writes +- are rejected. ++ are rejected. Note that enabling SMT on PowerPC skips ++ offline cores. + + What: /sys/devices/system/cpu/cpuX/power/energy_perf_bias + Date: March 2019 +diff --git a/Makefile b/Makefile +index 6b967e135c80f..cef1e15ad7606 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 47 ++SUBLEVEL = 48 + EXTRAVERSION = + NAME = Hurr durr I'ma ninja sloth + +diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c +index c81183935e970..7fcf3e9b71030 100644 +--- a/arch/alpha/kernel/pci_iommu.c ++++ b/arch/alpha/kernel/pci_iommu.c +@@ -929,7 +929,7 @@ const struct dma_map_ops alpha_pci_ops = { + .dma_supported = alpha_pci_supported, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, +- .alloc_pages = dma_common_alloc_pages, ++ .alloc_pages_op = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + }; + EXPORT_SYMBOL(alpha_pci_ops); +diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c +index e51535a5f939a..ccbff21ce1faf 100644 +--- a/arch/arm64/kernel/acpi_numa.c ++++ b/arch/arm64/kernel/acpi_numa.c +@@ -27,7 +27,7 @@ + + #include + +-static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE }; ++static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE }; + + int __init acpi_numa_get_nid(unsigned int cpu) + { +diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c +index 417a8a86b2db5..c583d1f335f8c 100644 +--- a/arch/arm64/kernel/setup.c ++++ b/arch/arm64/kernel/setup.c +@@ -371,9 +371,6 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) + smp_init_cpus(); + smp_build_mpidr_hash(); + +- /* Init percpu seeds for random tags after cpus are set up. */ +- kasan_init_sw_tags(); +- + #ifdef CONFIG_ARM64_SW_TTBR0_PAN + /* + * Make sure init_thread_info.ttbr0 always generates translation +diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c +index 960b98b43506d..14365ef842440 100644 +--- a/arch/arm64/kernel/smp.c ++++ b/arch/arm64/kernel/smp.c +@@ -459,6 +459,8 @@ void __init smp_prepare_boot_cpu(void) + init_gic_priority_masking(); + + kasan_init_hw_tags(); ++ /* Init percpu seeds for random tags after cpus are set up. */ ++ kasan_init_sw_tags(); + } + + /* +diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c +index 0afd6136e2759..b233a64df2956 100644 +--- a/arch/arm64/kvm/sys_regs.c ++++ b/arch/arm64/kvm/sys_regs.c +@@ -32,6 +32,7 @@ + #include + + #include "sys_regs.h" ++#include "vgic/vgic.h" + + #include "trace.h" + +@@ -301,6 +302,11 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu, + { + bool g1; + ++ if (!kvm_has_gicv3(vcpu->kvm)) { ++ kvm_inject_undefined(vcpu); ++ return false; ++ } ++ + if (!p->is_write) + return read_from_write_only(vcpu, p, r); + +diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h +index fae2618da720b..07e48f8a4f23b 100644 +--- a/arch/arm64/kvm/vgic/vgic.h ++++ b/arch/arm64/kvm/vgic/vgic.h +@@ -343,4 +343,11 @@ void vgic_v4_configure_vsgis(struct kvm *kvm); + void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val); + int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq); + ++static inline bool kvm_has_gicv3(struct kvm *kvm) ++{ ++ return (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) && ++ irqchip_in_kernel(kvm) && ++ kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3); ++} ++ + #endif +diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c +index eabddb89d221f..c97b089b99029 100644 +--- a/arch/mips/jazz/jazzdma.c ++++ b/arch/mips/jazz/jazzdma.c +@@ -617,7 +617,7 @@ const struct dma_map_ops jazz_dma_ops = { + .sync_sg_for_device = jazz_dma_sync_sg_for_device, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, +- .alloc_pages = dma_common_alloc_pages, ++ .alloc_pages_op = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + }; + EXPORT_SYMBOL(jazz_dma_ops); +diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c +index b406d8bfb15a3..c7fee72ea6067 100644 +--- a/arch/mips/kernel/cpu-probe.c ++++ b/arch/mips/kernel/cpu-probe.c +@@ -1725,12 +1725,16 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) + c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | + MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2); + c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */ ++ change_c0_config6(LOONGSON_CONF6_EXTIMER | LOONGSON_CONF6_INTIMER, ++ LOONGSON_CONF6_INTIMER); + break; + case PRID_IMP_LOONGSON_64G: + __cpu_name[cpu] = "ICT Loongson-3"; + set_elf_platform(cpu, "loongson3a"); + set_isa(c, MIPS_CPU_ISA_M64R2); + decode_cpucfg(c); ++ change_c0_config6(LOONGSON_CONF6_EXTIMER | LOONGSON_CONF6_INTIMER, ++ LOONGSON_CONF6_INTIMER); + break; + default: + panic("Unknown Loongson Processor ID!"); +diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c +index 9cf7fb60441f8..be56eaafc8b95 100644 +--- a/arch/openrisc/kernel/setup.c ++++ b/arch/openrisc/kernel/setup.c +@@ -255,6 +255,9 @@ void calibrate_delay(void) + + void __init setup_arch(char **cmdline_p) + { ++ /* setup memblock allocator */ ++ setup_memory(); ++ + unflatten_and_copy_device_tree(); + + setup_cpuinfo(); +@@ -278,9 +281,6 @@ void __init setup_arch(char **cmdline_p) + } + #endif + +- /* setup memblock allocator */ +- setup_memory(); +- + /* paging_init() sets up the MMU and marks all pages as reserved */ + paging_init(); + +diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c +index 2f81bfd4f15e1..dff66be65d290 100644 +--- a/arch/parisc/kernel/irq.c ++++ b/arch/parisc/kernel/irq.c +@@ -498,7 +498,7 @@ asmlinkage void do_cpu_irq_mask(struct pt_regs *regs) + + old_regs = set_irq_regs(regs); + local_irq_disable(); +- irq_enter(); ++ irq_enter_rcu(); + + eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu); + if (!eirr_val) +@@ -533,7 +533,7 @@ asmlinkage void do_cpu_irq_mask(struct pt_regs *regs) + #endif /* CONFIG_IRQSTACKS */ + + out: +- irq_exit(); ++ irq_exit_rcu(); + set_irq_regs(old_regs); + return; + +diff --git a/arch/powerpc/boot/simple_alloc.c b/arch/powerpc/boot/simple_alloc.c +index 267d6524caac4..d07796fdf91aa 100644 +--- a/arch/powerpc/boot/simple_alloc.c ++++ b/arch/powerpc/boot/simple_alloc.c +@@ -112,8 +112,11 @@ static void *simple_realloc(void *ptr, unsigned long size) + return ptr; + + new = simple_malloc(size); +- memcpy(new, ptr, p->size); +- simple_free(ptr); ++ if (new) { ++ memcpy(new, ptr, p->size); ++ simple_free(ptr); ++ } ++ + return new; + } + +diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h +index f4e6f2dd04b73..16bacfe8c7a2c 100644 +--- a/arch/powerpc/include/asm/topology.h ++++ b/arch/powerpc/include/asm/topology.h +@@ -145,6 +145,7 @@ static inline int cpu_to_coregroup_id(int cpu) + + #ifdef CONFIG_HOTPLUG_SMT + #include ++#include + #include + + static inline bool topology_is_primary_thread(unsigned int cpu) +@@ -156,6 +157,18 @@ static inline bool topology_smt_thread_allowed(unsigned int cpu) + { + return cpu_thread_in_core(cpu) < cpu_smt_num_threads; + } ++ ++#define topology_is_core_online topology_is_core_online ++static inline bool topology_is_core_online(unsigned int cpu) ++{ ++ int i, first_cpu = cpu_first_thread_sibling(cpu); ++ ++ for (i = first_cpu; i < first_cpu + threads_per_core; ++i) { ++ if (cpu_online(i)) ++ return true; ++ } ++ return false; ++} + #endif + + #endif /* __KERNEL__ */ +diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c +index 8920862ffd791..f0ae39e77e374 100644 +--- a/arch/powerpc/kernel/dma-iommu.c ++++ b/arch/powerpc/kernel/dma-iommu.c +@@ -216,6 +216,6 @@ const struct dma_map_ops dma_iommu_ops = { + .get_required_mask = dma_iommu_get_required_mask, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, +- .alloc_pages = dma_common_alloc_pages, ++ .alloc_pages_op = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + }; +diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c +index d6b5f5ecd5152..56dc6b29a3e76 100644 +--- a/arch/powerpc/platforms/ps3/system-bus.c ++++ b/arch/powerpc/platforms/ps3/system-bus.c +@@ -695,7 +695,7 @@ static const struct dma_map_ops ps3_sb_dma_ops = { + .unmap_page = ps3_unmap_page, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, +- .alloc_pages = dma_common_alloc_pages, ++ .alloc_pages_op = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + }; + +@@ -709,7 +709,7 @@ static const struct dma_map_ops ps3_ioc0_dma_ops = { + .unmap_page = ps3_unmap_page, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, +- .alloc_pages = dma_common_alloc_pages, ++ .alloc_pages_op = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + }; + +diff --git a/arch/powerpc/platforms/pseries/papr-sysparm.c b/arch/powerpc/platforms/pseries/papr-sysparm.c +index fedc61599e6cc..a1e7aeac74161 100644 +--- a/arch/powerpc/platforms/pseries/papr-sysparm.c ++++ b/arch/powerpc/platforms/pseries/papr-sysparm.c +@@ -23,6 +23,46 @@ void papr_sysparm_buf_free(struct papr_sysparm_buf *buf) + kfree(buf); + } + ++static size_t papr_sysparm_buf_get_length(const struct papr_sysparm_buf *buf) ++{ ++ return be16_to_cpu(buf->len); ++} ++ ++static void papr_sysparm_buf_set_length(struct papr_sysparm_buf *buf, size_t length) ++{ ++ WARN_ONCE(length > sizeof(buf->val), ++ "bogus length %zu, clamping to safe value", length); ++ length = min(sizeof(buf->val), length); ++ buf->len = cpu_to_be16(length); ++} ++ ++/* ++ * For use on buffers returned from ibm,get-system-parameter before ++ * returning them to callers. Ensures the encoded length of valid data ++ * cannot overrun buf->val[]. ++ */ ++static void papr_sysparm_buf_clamp_length(struct papr_sysparm_buf *buf) ++{ ++ papr_sysparm_buf_set_length(buf, papr_sysparm_buf_get_length(buf)); ++} ++ ++/* ++ * Perform some basic diligence on the system parameter buffer before ++ * submitting it to RTAS. ++ */ ++static bool papr_sysparm_buf_can_submit(const struct papr_sysparm_buf *buf) ++{ ++ /* ++ * Firmware ought to reject buffer lengths that exceed the ++ * maximum specified in PAPR, but there's no reason for the ++ * kernel to allow them either. ++ */ ++ if (papr_sysparm_buf_get_length(buf) > sizeof(buf->val)) ++ return false; ++ ++ return true; ++} ++ + /** + * papr_sysparm_get() - Retrieve the value of a PAPR system parameter. + * @param: PAPR system parameter token as described in +@@ -63,6 +103,9 @@ int papr_sysparm_get(papr_sysparm_t param, struct papr_sysparm_buf *buf) + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + ++ if (!papr_sysparm_buf_can_submit(buf)) ++ return -EINVAL; ++ + work_area = rtas_work_area_alloc(sizeof(*buf)); + + memcpy(rtas_work_area_raw_buf(work_area), buf, sizeof(*buf)); +@@ -77,6 +120,7 @@ int papr_sysparm_get(papr_sysparm_t param, struct papr_sysparm_buf *buf) + case 0: + ret = 0; + memcpy(buf, rtas_work_area_raw_buf(work_area), sizeof(*buf)); ++ papr_sysparm_buf_clamp_length(buf); + break; + case -3: /* parameter not implemented */ + ret = -EOPNOTSUPP; +@@ -115,6 +159,9 @@ int papr_sysparm_set(papr_sysparm_t param, const struct papr_sysparm_buf *buf) + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + ++ if (!papr_sysparm_buf_can_submit(buf)) ++ return -EINVAL; ++ + work_area = rtas_work_area_alloc(sizeof(*buf)); + + memcpy(rtas_work_area_raw_buf(work_area), buf, sizeof(*buf)); +diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c +index 2dc9cbc4bcd8f..0c90fc4c37963 100644 +--- a/arch/powerpc/platforms/pseries/vio.c ++++ b/arch/powerpc/platforms/pseries/vio.c +@@ -611,7 +611,7 @@ static const struct dma_map_ops vio_dma_mapping_ops = { + .get_required_mask = dma_iommu_get_required_mask, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, +- .alloc_pages = dma_common_alloc_pages, ++ .alloc_pages_op = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + }; + +diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c +index f6ec6dba92dcb..700b67476a7d8 100644 +--- a/arch/powerpc/sysdev/xics/icp-native.c ++++ b/arch/powerpc/sysdev/xics/icp-native.c +@@ -236,6 +236,8 @@ static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr, + rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation", + cpu, hw_id); + ++ if (!rname) ++ return -ENOMEM; + if (!request_mem_region(addr, size, rname)) { + pr_warn("icp_native: Could not reserve ICP MMIO for CPU %d, interrupt server #0x%x\n", + cpu, hw_id); +diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h +index bfb4c26f113c4..b5b84c6be01e1 100644 +--- a/arch/riscv/include/asm/asm.h ++++ b/arch/riscv/include/asm/asm.h +@@ -164,6 +164,16 @@ + REG_L x31, PT_T6(sp) + .endm + ++/* Annotate a function as being unsuitable for kprobes. */ ++#ifdef CONFIG_KPROBES ++#define ASM_NOKPROBE(name) \ ++ .pushsection "_kprobe_blacklist", "aw"; \ ++ RISCV_PTR name; \ ++ .popsection ++#else ++#define ASM_NOKPROBE(name) ++#endif ++ + #endif /* __ASSEMBLY__ */ + + #endif /* _ASM_RISCV_ASM_H */ +diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S +index 278d01d2911fd..ed7baf2cf7e87 100644 +--- a/arch/riscv/kernel/entry.S ++++ b/arch/riscv/kernel/entry.S +@@ -105,6 +105,7 @@ _save_context: + 1: + tail do_trap_unknown + SYM_CODE_END(handle_exception) ++ASM_NOKPROBE(handle_exception) + + /* + * The ret_from_exception must be called with interrupt disabled. Here is the +@@ -171,6 +172,7 @@ SYM_CODE_START_NOALIGN(ret_from_exception) + sret + #endif + SYM_CODE_END(ret_from_exception) ++ASM_NOKPROBE(ret_from_exception) + + #ifdef CONFIG_VMAP_STACK + SYM_CODE_START_LOCAL(handle_kernel_stack_overflow) +@@ -206,6 +208,7 @@ SYM_CODE_START_LOCAL(handle_kernel_stack_overflow) + move a0, sp + tail handle_bad_stack + SYM_CODE_END(handle_kernel_stack_overflow) ++ASM_NOKPROBE(handle_kernel_stack_overflow) + #endif + + SYM_CODE_START(ret_from_fork) +diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c +index 67d0073fb624d..2158b7a65d74f 100644 +--- a/arch/riscv/kernel/traps.c ++++ b/arch/riscv/kernel/traps.c +@@ -311,6 +311,7 @@ asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs) + + regs->epc += 4; + regs->orig_a0 = regs->a0; ++ regs->a0 = -ENOSYS; + + riscv_v_vstate_discard(regs); + +@@ -318,8 +319,6 @@ asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs) + + if (syscall >= 0 && syscall < NR_syscalls) + syscall_handler(regs, syscall); +- else if (syscall != -1) +- regs->a0 = -ENOSYS; + + syscall_exit_to_user_mode(regs); + } else { +diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c +index 9b10e9655df8c..abe7a7a7686c1 100644 +--- a/arch/riscv/mm/init.c ++++ b/arch/riscv/mm/init.c +@@ -912,7 +912,7 @@ static void __init create_kernel_page_table(pgd_t *pgdir, + PMD_SIZE, PAGE_KERNEL_EXEC); + + /* Map the data in RAM */ +- end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size; ++ end_va = kernel_map.virt_addr + kernel_map.size; + for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE) + create_pgd_mapping(pgdir, va, + kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)), +@@ -1081,7 +1081,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) + + phys_ram_base = CONFIG_PHYS_RAM_BASE; + kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE; +- kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata); ++ kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start); + + kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom; + #else +diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h +index 0e7bd3873907f..b2e2f9a4163c5 100644 +--- a/arch/s390/include/asm/uv.h ++++ b/arch/s390/include/asm/uv.h +@@ -442,7 +442,10 @@ static inline int share(unsigned long addr, u16 cmd) + + if (!uv_call(0, (u64)&uvcb)) + return 0; +- return -EINVAL; ++ pr_err("%s UVC failed (rc: 0x%x, rrc: 0x%x), possible hypervisor bug.\n", ++ uvcb.header.cmd == UVC_CMD_SET_SHARED_ACCESS ? "Share" : "Unshare", ++ uvcb.header.rc, uvcb.header.rrc); ++ panic("System security cannot be guaranteed unless the system panics now.\n"); + } + + /* +diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c +index 442ce0489e1a1..3a54733e4fc65 100644 +--- a/arch/s390/kernel/early.c ++++ b/arch/s390/kernel/early.c +@@ -258,15 +258,9 @@ static inline void save_vector_registers(void) + #endif + } + +-static inline void setup_control_registers(void) ++static inline void setup_low_address_protection(void) + { +- unsigned long reg; +- +- __ctl_store(reg, 0, 0); +- reg |= CR0_LOW_ADDRESS_PROTECTION; +- reg |= CR0_EMERGENCY_SIGNAL_SUBMASK; +- reg |= CR0_EXTERNAL_CALL_SUBMASK; +- __ctl_load(reg, 0, 0); ++ __ctl_set_bit(0, 28); + } + + static inline void setup_access_registers(void) +@@ -314,7 +308,7 @@ void __init startup_init(void) + save_vector_registers(); + setup_topology(); + sclp_early_detect(); +- setup_control_registers(); ++ setup_low_address_protection(); + setup_access_registers(); + lockdep_on(); + } +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c +index a4edb7ea66ea7..c63be2efd6895 100644 +--- a/arch/s390/kernel/smp.c ++++ b/arch/s390/kernel/smp.c +@@ -1013,12 +1013,12 @@ void __init smp_fill_possible_mask(void) + + void __init smp_prepare_cpus(unsigned int max_cpus) + { +- /* request the 0x1201 emergency signal external interrupt */ + if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt)) + panic("Couldn't request external interrupt 0x1201"); +- /* request the 0x1202 external call external interrupt */ ++ ctl_set_bit(0, 14); + if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt)) + panic("Couldn't request external interrupt 0x1202"); ++ ctl_set_bit(0, 13); + } + + void __init smp_prepare_boot_cpu(void) +diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h +index a8a6246835831..0c0f47ec63447 100644 +--- a/arch/s390/kvm/kvm-s390.h ++++ b/arch/s390/kvm/kvm-s390.h +@@ -249,7 +249,12 @@ static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots) + + static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm) + { +- u32 gd = virt_to_phys(kvm->arch.gisa_int.origin); ++ u32 gd; ++ ++ if (!kvm->arch.gisa_int.origin) ++ return 0; ++ ++ gd = virt_to_phys(kvm->arch.gisa_int.origin); + + if (gd && sclp.has_gisaf) + gd |= GISA_FORMAT1; +diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c +index 56a917df410d3..842a0ec5eaa9e 100644 +--- a/arch/x86/kernel/amd_gart_64.c ++++ b/arch/x86/kernel/amd_gart_64.c +@@ -676,7 +676,7 @@ static const struct dma_map_ops gart_dma_ops = { + .get_sgtable = dma_common_get_sgtable, + .dma_supported = dma_direct_supported, + .get_required_mask = dma_direct_get_required_mask, +- .alloc_pages = dma_direct_alloc_pages, ++ .alloc_pages_op = dma_direct_alloc_pages, + .free_pages = dma_direct_free_pages, + }; + +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c +index b6f4e8399fca2..5351f293f770b 100644 +--- a/arch/x86/kernel/process.c ++++ b/arch/x86/kernel/process.c +@@ -1030,7 +1030,10 @@ unsigned long arch_align_stack(unsigned long sp) + + unsigned long arch_randomize_brk(struct mm_struct *mm) + { +- return randomize_page(mm->brk, 0x02000000); ++ if (mmap_is_ia32()) ++ return randomize_page(mm->brk, SZ_32M); ++ ++ return randomize_page(mm->brk, SZ_1G); + } + + /* +diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c +index cc57e2dd9a0bb..2cafcf11ee8be 100644 +--- a/block/blk-mq-tag.c ++++ b/block/blk-mq-tag.c +@@ -38,6 +38,7 @@ static void blk_mq_update_wake_batch(struct blk_mq_tags *tags, + void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) + { + unsigned int users; ++ unsigned long flags; + struct blk_mq_tags *tags = hctx->tags; + + /* +@@ -56,11 +57,11 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) + return; + } + +- spin_lock_irq(&tags->lock); ++ spin_lock_irqsave(&tags->lock, flags); + users = tags->active_queues + 1; + WRITE_ONCE(tags->active_queues, users); + blk_mq_update_wake_batch(tags, users); +- spin_unlock_irq(&tags->lock); ++ spin_unlock_irqrestore(&tags->lock, flags); + } + + /* +diff --git a/drivers/accel/habanalabs/common/debugfs.c b/drivers/accel/habanalabs/common/debugfs.c +index 9e84a47a21dcf..7d733e4d50611 100644 +--- a/drivers/accel/habanalabs/common/debugfs.c ++++ b/drivers/accel/habanalabs/common/debugfs.c +@@ -1645,19 +1645,19 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent + &hl_data64b_fops); + + debugfs_create_file("set_power_state", +- 0200, ++ 0644, + root, + dev_entry, + &hl_power_fops); + + debugfs_create_file("device", +- 0200, ++ 0644, + root, + dev_entry, + &hl_device_fops); + + debugfs_create_file("clk_gate", +- 0200, ++ 0644, + root, + dev_entry, + &hl_clk_gate_fops); +@@ -1669,13 +1669,13 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent + &hl_stop_on_err_fops); + + debugfs_create_file("dump_security_violations", +- 0644, ++ 0400, + root, + dev_entry, + &hl_security_violations_fops); + + debugfs_create_file("dump_razwi_events", +- 0644, ++ 0400, + root, + dev_entry, + &hl_razwi_check_fops); +@@ -1708,7 +1708,7 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent + &hdev->reset_info.skip_reset_on_timeout); + + debugfs_create_file("state_dump", +- 0600, ++ 0644, + root, + dev_entry, + &hl_state_dump_fops); +@@ -1726,7 +1726,7 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent + + for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) { + debugfs_create_file(hl_debugfs_list[i].name, +- 0444, ++ 0644, + root, + entry, + &hl_debugfs_fops); +diff --git a/drivers/accel/habanalabs/common/irq.c b/drivers/accel/habanalabs/common/irq.c +index b1010d206c2ef..813315cea4a7b 100644 +--- a/drivers/accel/habanalabs/common/irq.c ++++ b/drivers/accel/habanalabs/common/irq.c +@@ -271,6 +271,9 @@ static int handle_registration_node(struct hl_device *hdev, struct hl_user_pendi + free_node->cq_cb = pend->ts_reg_info.cq_cb; + list_add(&free_node->free_objects_node, *free_list); + ++ /* Mark TS record as free */ ++ pend->ts_reg_info.in_use = false; ++ + return 0; + } + +diff --git a/drivers/accel/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c +index 4fc72a07d2f59..5b7d9a351133f 100644 +--- a/drivers/accel/habanalabs/common/memory.c ++++ b/drivers/accel/habanalabs/common/memory.c +@@ -1878,16 +1878,16 @@ static int export_dmabuf(struct hl_ctx *ctx, + + static int validate_export_params_common(struct hl_device *hdev, u64 device_addr, u64 size) + { +- if (!IS_ALIGNED(device_addr, PAGE_SIZE)) { ++ if (!PAGE_ALIGNED(device_addr)) { + dev_dbg(hdev->dev, +- "exported device memory address 0x%llx should be aligned to 0x%lx\n", ++ "exported device memory address 0x%llx should be aligned to PAGE_SIZE 0x%lx\n", + device_addr, PAGE_SIZE); + return -EINVAL; + } + +- if (size < PAGE_SIZE) { ++ if (!size || !PAGE_ALIGNED(size)) { + dev_dbg(hdev->dev, +- "exported device memory size %llu should be equal to or greater than %lu\n", ++ "exported device memory size %llu should be a multiple of PAGE_SIZE %lu\n", + size, PAGE_SIZE); + return -EINVAL; + } +@@ -1938,6 +1938,13 @@ static int validate_export_params(struct hl_device *hdev, u64 device_addr, u64 s + if (rc) + return rc; + ++ if (!PAGE_ALIGNED(offset)) { ++ dev_dbg(hdev->dev, ++ "exported device memory offset %llu should be a multiple of PAGE_SIZE %lu\n", ++ offset, PAGE_SIZE); ++ return -EINVAL; ++ } ++ + if ((offset + size) > phys_pg_pack->total_size) { + dev_dbg(hdev->dev, "offset %#llx and size %#llx exceed total map size %#llx\n", + offset, size, phys_pg_pack->total_size); +diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2_security.c b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c +index 2742b1f801eb2..908710524dc9e 100644 +--- a/drivers/accel/habanalabs/gaudi2/gaudi2_security.c ++++ b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c +@@ -1601,6 +1601,7 @@ static const u32 gaudi2_pb_dcr0_tpc0_unsecured_regs[] = { + mmDCORE0_TPC0_CFG_KERNEL_SRF_30, + mmDCORE0_TPC0_CFG_KERNEL_SRF_31, + mmDCORE0_TPC0_CFG_TPC_SB_L0CD, ++ mmDCORE0_TPC0_CFG_TPC_COUNT, + mmDCORE0_TPC0_CFG_TPC_ID, + mmDCORE0_TPC0_CFG_QM_KERNEL_ID_INC, + mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_0, +diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h +index 2133085deda77..1c5218b79fc2a 100644 +--- a/drivers/acpi/acpica/acevents.h ++++ b/drivers/acpi/acpica/acevents.h +@@ -188,13 +188,9 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj, + u8 acpi_ns_is_locked); + + void +-acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, ++acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, u32 max_depth, + acpi_adr_space_type space_id, u32 function); + +-void +-acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *node, +- acpi_adr_space_type space_id); +- + acpi_status + acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function); + +diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c +index dc6004daf624b..cf53b9535f18e 100644 +--- a/drivers/acpi/acpica/evregion.c ++++ b/drivers/acpi/acpica/evregion.c +@@ -20,6 +20,10 @@ extern u8 acpi_gbl_default_address_spaces[]; + + /* Local prototypes */ + ++static void ++acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node, ++ acpi_adr_space_type space_id); ++ + static acpi_status + acpi_ev_reg_run(acpi_handle obj_handle, + u32 level, void *context, void **return_value); +@@ -61,6 +65,7 @@ acpi_status acpi_ev_initialize_op_regions(void) + acpi_gbl_default_address_spaces + [i])) { + acpi_ev_execute_reg_methods(acpi_gbl_root_node, ++ ACPI_UINT32_MAX, + acpi_gbl_default_address_spaces + [i], ACPI_REG_CONNECT); + } +@@ -668,6 +673,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function) + * FUNCTION: acpi_ev_execute_reg_methods + * + * PARAMETERS: node - Namespace node for the device ++ * max_depth - Depth to which search for _REG + * space_id - The address space ID + * function - Passed to _REG: On (1) or Off (0) + * +@@ -679,7 +685,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function) + ******************************************************************************/ + + void +-acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, ++acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, u32 max_depth, + acpi_adr_space_type space_id, u32 function) + { + struct acpi_reg_walk_info info; +@@ -713,7 +719,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, + * regions and _REG methods. (i.e. handlers must be installed for all + * regions of this Space ID before we can run any _REG methods) + */ +- (void)acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, ++ (void)acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, max_depth, + ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, NULL, + &info, NULL); + +@@ -814,7 +820,7 @@ acpi_ev_reg_run(acpi_handle obj_handle, + * + ******************************************************************************/ + +-void ++static void + acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node, + acpi_adr_space_type space_id) + { +diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c +index 624361a5f34d8..95f78383bbdba 100644 +--- a/drivers/acpi/acpica/evxfregn.c ++++ b/drivers/acpi/acpica/evxfregn.c +@@ -85,7 +85,8 @@ acpi_install_address_space_handler_internal(acpi_handle device, + /* Run all _REG methods for this address space */ + + if (run_reg) { +- acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT); ++ acpi_ev_execute_reg_methods(node, ACPI_UINT32_MAX, space_id, ++ ACPI_REG_CONNECT); + } + + unlock_and_exit: +@@ -263,6 +264,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler) + * FUNCTION: acpi_execute_reg_methods + * + * PARAMETERS: device - Handle for the device ++ * max_depth - Depth to which search for _REG + * space_id - The address space ID + * + * RETURN: Status +@@ -271,7 +273,8 @@ ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler) + * + ******************************************************************************/ + acpi_status +-acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id) ++acpi_execute_reg_methods(acpi_handle device, u32 max_depth, ++ acpi_adr_space_type space_id) + { + struct acpi_namespace_node *node; + acpi_status status; +@@ -296,7 +299,8 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id) + + /* Run all _REG methods for this address space */ + +- acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT); ++ acpi_ev_execute_reg_methods(node, max_depth, space_id, ++ ACPI_REG_CONNECT); + } else { + status = AE_BAD_PARAMETER; + } +@@ -306,57 +310,3 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id) + } + + ACPI_EXPORT_SYMBOL(acpi_execute_reg_methods) +- +-/******************************************************************************* +- * +- * FUNCTION: acpi_execute_orphan_reg_method +- * +- * PARAMETERS: device - Handle for the device +- * space_id - The address space ID +- * +- * RETURN: Status +- * +- * DESCRIPTION: Execute an "orphan" _REG method that appears under an ACPI +- * device. This is a _REG method that has no corresponding region +- * within the device's scope. +- * +- ******************************************************************************/ +-acpi_status +-acpi_execute_orphan_reg_method(acpi_handle device, acpi_adr_space_type space_id) +-{ +- struct acpi_namespace_node *node; +- acpi_status status; +- +- ACPI_FUNCTION_TRACE(acpi_execute_orphan_reg_method); +- +- /* Parameter validation */ +- +- if (!device) { +- return_ACPI_STATUS(AE_BAD_PARAMETER); +- } +- +- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); +- if (ACPI_FAILURE(status)) { +- return_ACPI_STATUS(status); +- } +- +- /* Convert and validate the device handle */ +- +- node = acpi_ns_validate_handle(device); +- if (node) { +- +- /* +- * If an "orphan" _REG method is present in the device's scope +- * for the given address space ID, run it. +- */ +- +- acpi_ev_execute_orphan_reg_method(node, space_id); +- } else { +- status = AE_BAD_PARAMETER; +- } +- +- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); +- return_ACPI_STATUS(status); +-} +- +-ACPI_EXPORT_SYMBOL(acpi_execute_orphan_reg_method) +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c +index 4e3a4d96797e0..35e22a2af4e4b 100644 +--- a/drivers/acpi/ec.c ++++ b/drivers/acpi/ec.c +@@ -1487,12 +1487,13 @@ static bool install_gpio_irq_event_handler(struct acpi_ec *ec) + static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device, + bool call_reg) + { +- acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle; + acpi_status status; + + acpi_ec_start(ec, false); + + if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { ++ acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle; ++ + acpi_ec_enter_noirq(ec); + status = acpi_install_address_space_handler_no_reg(scope_handle, + ACPI_ADR_SPACE_EC, +@@ -1506,10 +1507,7 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device, + } + + if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) { +- acpi_execute_reg_methods(scope_handle, ACPI_ADR_SPACE_EC); +- if (scope_handle != ec->handle) +- acpi_execute_orphan_reg_method(ec->handle, ACPI_ADR_SPACE_EC); +- ++ acpi_execute_reg_methods(ec->handle, ACPI_UINT32_MAX, ACPI_ADR_SPACE_EC); + set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags); + } + +@@ -1724,6 +1722,12 @@ static void acpi_ec_remove(struct acpi_device *device) + } + } + ++void acpi_ec_register_opregions(struct acpi_device *adev) ++{ ++ if (first_ec && first_ec->handle != adev->handle) ++ acpi_execute_reg_methods(adev->handle, 1, ACPI_ADR_SPACE_EC); ++} ++ + static acpi_status + ec_parse_io_ports(struct acpi_resource *resource, void *context) + { +diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h +index 6db1a03dd5399..1e8ee97fc85f3 100644 +--- a/drivers/acpi/internal.h ++++ b/drivers/acpi/internal.h +@@ -204,6 +204,7 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, + acpi_handle handle, acpi_ec_query_func func, + void *data); + void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit); ++void acpi_ec_register_opregions(struct acpi_device *adev); + + #ifdef CONFIG_PM_SLEEP + void acpi_ec_flush_work(void); +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c +index 5c4e353448f52..c0c5c5c58ae1e 100644 +--- a/drivers/acpi/scan.c ++++ b/drivers/acpi/scan.c +@@ -2198,6 +2198,8 @@ static int acpi_bus_attach(struct acpi_device *device, void *first_pass) + if (device->handler) + goto ok; + ++ acpi_ec_register_opregions(device); ++ + if (!device->flags.initialized) { + device->flags.power_manageable = + device->power.states[ACPI_STATE_D0].flags.valid; +diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c +index e7f713cd70d3f..a876024d8a05f 100644 +--- a/drivers/atm/idt77252.c ++++ b/drivers/atm/idt77252.c +@@ -1118,8 +1118,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) + rpp->len += skb->len; + + if (stat & SAR_RSQE_EPDU) { ++ unsigned int len, truesize; + unsigned char *l1l2; +- unsigned int len; + + l1l2 = (unsigned char *) ((unsigned long) skb->data + skb->len - 6); + +@@ -1189,14 +1189,15 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) + ATM_SKB(skb)->vcc = vcc; + __net_timestamp(skb); + ++ truesize = skb->truesize; + vcc->push(vcc, skb); + atomic_inc(&vcc->stats->rx); + +- if (skb->truesize > SAR_FB_SIZE_3) ++ if (truesize > SAR_FB_SIZE_3) + add_rx_skb(card, 3, SAR_FB_SIZE_3, 1); +- else if (skb->truesize > SAR_FB_SIZE_2) ++ else if (truesize > SAR_FB_SIZE_2) + add_rx_skb(card, 2, SAR_FB_SIZE_2, 1); +- else if (skb->truesize > SAR_FB_SIZE_1) ++ else if (truesize > SAR_FB_SIZE_1) + add_rx_skb(card, 1, SAR_FB_SIZE_1, 1); + else + add_rx_skb(card, 0, SAR_FB_SIZE_0, 1); +diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c +index 5a5afa14ca8cb..45771b1a3716a 100644 +--- a/drivers/char/xillybus/xillyusb.c ++++ b/drivers/char/xillybus/xillyusb.c +@@ -50,6 +50,7 @@ MODULE_LICENSE("GPL v2"); + static const char xillyname[] = "xillyusb"; + + static unsigned int fifo_buf_order; ++static struct workqueue_struct *wakeup_wq; + + #define USB_VENDOR_ID_XILINX 0x03fd + #define USB_VENDOR_ID_ALTERA 0x09fb +@@ -569,10 +570,6 @@ static void cleanup_dev(struct kref *kref) + * errors if executed. The mechanism relies on that xdev->error is assigned + * a non-zero value by report_io_error() prior to queueing wakeup_all(), + * which prevents bulk_in_work() from calling process_bulk_in(). +- * +- * The fact that wakeup_all() and bulk_in_work() are queued on the same +- * workqueue makes their concurrent execution very unlikely, however the +- * kernel's API doesn't seem to ensure this strictly. + */ + + static void wakeup_all(struct work_struct *work) +@@ -627,7 +624,7 @@ static void report_io_error(struct xillyusb_dev *xdev, + + if (do_once) { + kref_get(&xdev->kref); /* xdev is used by work item */ +- queue_work(xdev->workq, &xdev->wakeup_workitem); ++ queue_work(wakeup_wq, &xdev->wakeup_workitem); + } + } + +@@ -1906,6 +1903,13 @@ static const struct file_operations xillyusb_fops = { + + static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev) + { ++ struct usb_device *udev = xdev->udev; ++ ++ /* Verify that device has the two fundamental bulk in/out endpoints */ ++ if (usb_pipe_type_check(udev, usb_sndbulkpipe(udev, MSG_EP_NUM)) || ++ usb_pipe_type_check(udev, usb_rcvbulkpipe(udev, IN_EP_NUM))) ++ return -ENODEV; ++ + xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT, + bulk_out_work, 1, 2); + if (!xdev->msg_ep) +@@ -1935,14 +1939,15 @@ static int setup_channels(struct xillyusb_dev *xdev, + __le16 *chandesc, + int num_channels) + { +- struct xillyusb_channel *chan; ++ struct usb_device *udev = xdev->udev; ++ struct xillyusb_channel *chan, *new_channels; + int i; + + chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL); + if (!chan) + return -ENOMEM; + +- xdev->channels = chan; ++ new_channels = chan; + + for (i = 0; i < num_channels; i++, chan++) { + unsigned int in_desc = le16_to_cpu(*chandesc++); +@@ -1971,6 +1976,15 @@ static int setup_channels(struct xillyusb_dev *xdev, + */ + + if ((out_desc & 0x80) && i < 14) { /* Entry is valid */ ++ if (usb_pipe_type_check(udev, ++ usb_sndbulkpipe(udev, i + 2))) { ++ dev_err(xdev->dev, ++ "Missing BULK OUT endpoint %d\n", ++ i + 2); ++ kfree(new_channels); ++ return -ENODEV; ++ } ++ + chan->writable = 1; + chan->out_synchronous = !!(out_desc & 0x40); + chan->out_seekable = !!(out_desc & 0x20); +@@ -1980,6 +1994,7 @@ static int setup_channels(struct xillyusb_dev *xdev, + } + } + ++ xdev->channels = new_channels; + return 0; + } + +@@ -2096,9 +2111,11 @@ static int xillyusb_discovery(struct usb_interface *interface) + * just after responding with the IDT, there is no reason for any + * work item to be running now. To be sure that xdev->channels + * is updated on anything that might run in parallel, flush the +- * workqueue, which rarely does anything. ++ * device's workqueue and the wakeup work item. This rarely ++ * does anything. + */ + flush_workqueue(xdev->workq); ++ flush_work(&xdev->wakeup_workitem); + + xdev->num_channels = num_channels; + +@@ -2258,6 +2275,10 @@ static int __init xillyusb_init(void) + { + int rc = 0; + ++ wakeup_wq = alloc_workqueue(xillyname, 0, 0); ++ if (!wakeup_wq) ++ return -ENOMEM; ++ + if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT) + fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT; + else +@@ -2265,12 +2286,17 @@ static int __init xillyusb_init(void) + + rc = usb_register(&xillyusb_driver); + ++ if (rc) ++ destroy_workqueue(wakeup_wq); ++ + return rc; + } + + static void __exit xillyusb_exit(void) + { + usb_deregister(&xillyusb_driver); ++ ++ destroy_workqueue(wakeup_wq); + } + + module_init(xillyusb_init); +diff --git a/drivers/clk/visconti/pll.c b/drivers/clk/visconti/pll.c +index 1f3234f226674..e9cd80e085dc3 100644 +--- a/drivers/clk/visconti/pll.c ++++ b/drivers/clk/visconti/pll.c +@@ -329,12 +329,12 @@ struct visconti_pll_provider * __init visconti_init_pll(struct device_node *np, + if (!ctx) + return ERR_PTR(-ENOMEM); + +- for (i = 0; i < nr_plls; ++i) +- ctx->clk_data.hws[i] = ERR_PTR(-ENOENT); +- + ctx->node = np; + ctx->reg_base = base; + ctx->clk_data.num = nr_plls; + ++ for (i = 0; i < nr_plls; ++i) ++ ctx->clk_data.hws[i] = ERR_PTR(-ENOENT); ++ + return ctx; + } +diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c +index e1c773bb55359..22a58d35a41fa 100644 +--- a/drivers/clocksource/arm_global_timer.c ++++ b/drivers/clocksource/arm_global_timer.c +@@ -290,18 +290,17 @@ static int gt_clk_rate_change_cb(struct notifier_block *nb, + switch (event) { + case PRE_RATE_CHANGE: + { +- int psv; ++ unsigned long psv; + +- psv = DIV_ROUND_CLOSEST(ndata->new_rate, +- gt_target_rate); +- +- if (abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR) ++ psv = DIV_ROUND_CLOSEST(ndata->new_rate, gt_target_rate); ++ if (!psv || ++ abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR) + return NOTIFY_BAD; + + psv--; + + /* prescaler within legal range? */ +- if (psv < 0 || psv > GT_CONTROL_PRESCALER_MAX) ++ if (psv > GT_CONTROL_PRESCALER_MAX) + return NOTIFY_BAD; + + /* +diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c +index 03d7a74ca22dc..8d18099fd528c 100644 +--- a/drivers/edac/skx_common.c ++++ b/drivers/edac/skx_common.c +@@ -659,6 +659,10 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val, + memset(&res, 0, sizeof(res)); + res.mce = mce; + res.addr = mce->addr & MCI_ADDR_PHYSADDR; ++ if (!pfn_to_online_page(res.addr >> PAGE_SHIFT) && !arch_is_platform_page(res.addr)) { ++ pr_err("Invalid address 0x%llx in IA32_MC%d_ADDR\n", mce->addr, mce->bank); ++ return NOTIFY_DONE; ++ } + + /* Try driver decoder first */ + if (!(driver_decode && driver_decode(&res))) { +diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c +index bd1651e709365..a1da7581adb03 100644 +--- a/drivers/firmware/cirrus/cs_dsp.c ++++ b/drivers/firmware/cirrus/cs_dsp.c +@@ -522,7 +522,7 @@ void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp) + { + cs_dsp_debugfs_clear(dsp); + debugfs_remove_recursive(dsp->debugfs_root); +- dsp->debugfs_root = NULL; ++ dsp->debugfs_root = ERR_PTR(-ENODEV); + } + EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, FW_CS_DSP); + #else +@@ -2343,6 +2343,11 @@ static int cs_dsp_common_init(struct cs_dsp *dsp) + + mutex_init(&dsp->pwr_lock); + ++#ifdef CONFIG_DEBUG_FS ++ /* Ensure this is invalid if client never provides a debugfs root */ ++ dsp->debugfs_root = ERR_PTR(-ENODEV); ++#endif ++ + return 0; + } + +diff --git a/drivers/gpio/gpio-mlxbf3.c b/drivers/gpio/gpio-mlxbf3.c +index d5906d419b0ab..10ea71273c891 100644 +--- a/drivers/gpio/gpio-mlxbf3.c ++++ b/drivers/gpio/gpio-mlxbf3.c +@@ -39,6 +39,8 @@ + #define MLXBF_GPIO_CAUSE_OR_EVTEN0 0x14 + #define MLXBF_GPIO_CAUSE_OR_CLRCAUSE 0x18 + ++#define MLXBF_GPIO_CLR_ALL_INTS GENMASK(31, 0) ++ + struct mlxbf3_gpio_context { + struct gpio_chip gc; + +@@ -82,6 +84,8 @@ static void mlxbf3_gpio_irq_disable(struct irq_data *irqd) + val = readl(gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0); + val &= ~BIT(offset); + writel(val, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0); ++ ++ writel(BIT(offset), gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE); + raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags); + + gpiochip_disable_irq(gc, offset); +@@ -253,6 +257,15 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev) + return 0; + } + ++static void mlxbf3_gpio_shutdown(struct platform_device *pdev) ++{ ++ struct mlxbf3_gpio_context *gs = platform_get_drvdata(pdev); ++ ++ /* Disable and clear all interrupts */ ++ writel(0, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0); ++ writel(MLXBF_GPIO_CLR_ALL_INTS, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE); ++} ++ + static const struct acpi_device_id mlxbf3_gpio_acpi_match[] = { + { "MLNXBF33", 0 }, + {} +@@ -265,6 +278,7 @@ static struct platform_driver mlxbf3_gpio_driver = { + .acpi_match_table = mlxbf3_gpio_acpi_match, + }, + .probe = mlxbf3_gpio_probe, ++ .shutdown = mlxbf3_gpio_shutdown, + }; + module_platform_driver(mlxbf3_gpio_driver); + +diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c +index 12d853845bb80..6c27312c62788 100644 +--- a/drivers/gpio/gpiolib-sysfs.c ++++ b/drivers/gpio/gpiolib-sysfs.c +@@ -1,6 +1,7 @@ + // SPDX-License-Identifier: GPL-2.0 + + #include ++#include + #include + #include + #include +@@ -774,15 +775,15 @@ void gpiochip_sysfs_unregister(struct gpio_device *gdev) + struct gpio_desc *desc; + struct gpio_chip *chip = gdev->chip; + +- if (!gdev->mockdev) +- return; ++ scoped_guard(mutex, &sysfs_lock) { ++ if (!gdev->mockdev) ++ return; + +- device_unregister(gdev->mockdev); ++ device_unregister(gdev->mockdev); + +- /* prevent further gpiod exports */ +- mutex_lock(&sysfs_lock); +- gdev->mockdev = NULL; +- mutex_unlock(&sysfs_lock); ++ /* prevent further gpiod exports */ ++ gdev->mockdev = NULL; ++ } + + /* unregister gpiod class devices owned by sysfs */ + for_each_gpio_desc_with_flag(chip, desc, FLAG_SYSFS) { +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +index 2fe9860725bd9..db5b1c6beba75 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +@@ -303,6 +303,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev, + struct kgd_mem *mem, void *drm_priv); + int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( + struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv); ++int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv); + int amdgpu_amdkfd_gpuvm_sync_memory( + struct amdgpu_device *adev, struct kgd_mem *mem, bool intr); + int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem, +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +index 62c1dc9510a41..9d72bb0a0eaec 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +@@ -733,7 +733,7 @@ kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem, + enum dma_data_direction dir; + + if (unlikely(!ttm->sg)) { +- pr_err("SG Table of BO is UNEXPECTEDLY NULL"); ++ pr_debug("SG Table of BO is NULL"); + return; + } + +@@ -1202,8 +1202,6 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem, + amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); + + amdgpu_sync_fence(sync, bo_va->last_pt_update); +- +- kfd_mem_dmaunmap_attachment(mem, entry); + } + + static int update_gpuvm_pte(struct kgd_mem *mem, +@@ -1258,6 +1256,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem, + + update_gpuvm_pte_failed: + unmap_bo_from_gpuvm(mem, entry, sync); ++ kfd_mem_dmaunmap_attachment(mem, entry); + return ret; + } + +@@ -1862,8 +1861,10 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( + mem->va + bo_size * (1 + mem->aql_queue)); + + /* Remove from VM internal data structures */ +- list_for_each_entry_safe(entry, tmp, &mem->attachments, list) ++ list_for_each_entry_safe(entry, tmp, &mem->attachments, list) { ++ kfd_mem_dmaunmap_attachment(mem, entry); + kfd_mem_detach(entry); ++ } + + ret = unreserve_bo_and_vms(&ctx, false, false); + +@@ -2037,6 +2038,37 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( + return ret; + } + ++int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv) ++{ ++ struct kfd_mem_attachment *entry; ++ struct amdgpu_vm *vm; ++ int ret; ++ ++ vm = drm_priv_to_vm(drm_priv); ++ ++ mutex_lock(&mem->lock); ++ ++ ret = amdgpu_bo_reserve(mem->bo, true); ++ if (ret) ++ goto out; ++ ++ list_for_each_entry(entry, &mem->attachments, list) { ++ if (entry->bo_va->base.vm != vm) ++ continue; ++ if (entry->bo_va->base.bo->tbo.ttm && ++ !entry->bo_va->base.bo->tbo.ttm->sg) ++ continue; ++ ++ kfd_mem_dmaunmap_attachment(mem, entry); ++ } ++ ++ amdgpu_bo_unreserve(mem->bo); ++out: ++ mutex_unlock(&mem->lock); ++ ++ return ret; ++} ++ + int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( + struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv) + { +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +index 4294f5e7bff9a..61668a784315f 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +@@ -1057,6 +1057,9 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p, + r = amdgpu_ring_parse_cs(ring, p, job, ib); + if (r) + return r; ++ ++ if (ib->sa_bo) ++ ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); + } else { + ib->ptr = (uint32_t *)kptr; + r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +index 76549c2cffebe..cf8804fa7e971 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +@@ -684,16 +684,24 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, + + switch (args->in.op) { + case AMDGPU_CTX_OP_ALLOC_CTX: ++ if (args->in.flags) ++ return -EINVAL; + r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id); + args->out.alloc.ctx_id = id; + break; + case AMDGPU_CTX_OP_FREE_CTX: ++ if (args->in.flags) ++ return -EINVAL; + r = amdgpu_ctx_free(fpriv, id); + break; + case AMDGPU_CTX_OP_QUERY_STATE: ++ if (args->in.flags) ++ return -EINVAL; + r = amdgpu_ctx_query(adev, fpriv, id, &args->out); + break; + case AMDGPU_CTX_OP_QUERY_STATE2: ++ if (args->in.flags) ++ return -EINVAL; + r = amdgpu_ctx_query2(adev, fpriv, id, &args->out); + break; + case AMDGPU_CTX_OP_GET_STABLE_PSTATE: +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c +index 8e8afbd237bcd..9aff579c6abf5 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c +@@ -166,6 +166,9 @@ static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t + if (ret) + return -EFAULT; + ++ if (ta_bin_len > PSP_1_MEG) ++ return -EINVAL; ++ + copy_pos += sizeof(uint32_t); + + ta_bin = kzalloc(ta_bin_len, GFP_KERNEL); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +index 03b4bcfca1963..111350ef1b742 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +@@ -135,6 +135,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) + } + } + ++ /* from vcn4 and above, only unified queue is used */ ++ adev->vcn.using_unified_queue = ++ adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0); ++ + hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); + +@@ -259,18 +263,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) + return 0; + } + +-/* from vcn4 and above, only unified queue is used */ +-static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring) +-{ +- struct amdgpu_device *adev = ring->adev; +- bool ret = false; +- +- if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)) +- ret = true; +- +- return ret; +-} +- + bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance) + { + bool ret = false; +@@ -380,7 +372,9 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) + for (i = 0; i < adev->vcn.num_enc_rings; ++i) + fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]); + +- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { ++ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */ ++ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && ++ !adev->vcn.using_unified_queue) { + struct dpg_pause_state new_state; + + if (fence[j] || +@@ -426,7 +420,9 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_UNGATE); + +- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { ++ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */ ++ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && ++ !adev->vcn.using_unified_queue) { + struct dpg_pause_state new_state; + + if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) { +@@ -452,8 +448,12 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) + + void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring) + { ++ struct amdgpu_device *adev = ring->adev; ++ ++ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */ + if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && +- ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) ++ ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC && ++ !adev->vcn.using_unified_queue) + atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt); + + atomic_dec(&ring->adev->vcn.total_submission_cnt); +@@ -707,12 +707,11 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, + struct amdgpu_job *job; + struct amdgpu_ib *ib; + uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); +- bool sq = amdgpu_vcn_using_unified_queue(ring); + uint32_t *ib_checksum; + uint32_t ib_pack_in_dw; + int i, r; + +- if (sq) ++ if (adev->vcn.using_unified_queue) + ib_size_dw += 8; + + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, +@@ -725,7 +724,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, + ib->length_dw = 0; + + /* single queue headers */ +- if (sq) { ++ if (adev->vcn.using_unified_queue) { + ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t) + + 4 + 2; /* engine info + decoding ib in dw */ + ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false); +@@ -744,7 +743,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, + for (i = ib->length_dw; i < ib_size_dw; ++i) + ib->ptr[i] = 0x0; + +- if (sq) ++ if (adev->vcn.using_unified_queue) + amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw); + + r = amdgpu_job_submit_direct(job, ring, &f); +@@ -834,15 +833,15 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand + struct dma_fence **fence) + { + unsigned int ib_size_dw = 16; ++ struct amdgpu_device *adev = ring->adev; + struct amdgpu_job *job; + struct amdgpu_ib *ib; + struct dma_fence *f = NULL; + uint32_t *ib_checksum = NULL; + uint64_t addr; +- bool sq = amdgpu_vcn_using_unified_queue(ring); + int i, r; + +- if (sq) ++ if (adev->vcn.using_unified_queue) + ib_size_dw += 8; + + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, +@@ -856,7 +855,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand + + ib->length_dw = 0; + +- if (sq) ++ if (adev->vcn.using_unified_queue) + ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true); + + ib->ptr[ib->length_dw++] = 0x00000018; +@@ -878,7 +877,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand + for (i = ib->length_dw; i < ib_size_dw; ++i) + ib->ptr[i] = 0x0; + +- if (sq) ++ if (adev->vcn.using_unified_queue) + amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11); + + r = amdgpu_job_submit_direct(job, ring, &f); +@@ -901,15 +900,15 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han + struct dma_fence **fence) + { + unsigned int ib_size_dw = 16; ++ struct amdgpu_device *adev = ring->adev; + struct amdgpu_job *job; + struct amdgpu_ib *ib; + struct dma_fence *f = NULL; + uint32_t *ib_checksum = NULL; + uint64_t addr; +- bool sq = amdgpu_vcn_using_unified_queue(ring); + int i, r; + +- if (sq) ++ if (adev->vcn.using_unified_queue) + ib_size_dw += 8; + + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, +@@ -923,7 +922,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han + + ib->length_dw = 0; + +- if (sq) ++ if (adev->vcn.using_unified_queue) + ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true); + + ib->ptr[ib->length_dw++] = 0x00000018; +@@ -945,7 +944,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han + for (i = ib->length_dw; i < ib_size_dw; ++i) + ib->ptr[i] = 0x0; + +- if (sq) ++ if (adev->vcn.using_unified_queue) + amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11); + + r = amdgpu_job_submit_direct(job, ring, &f); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +index a3eed90b6af09..3dc2cffdae4fc 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +@@ -284,6 +284,7 @@ struct amdgpu_vcn { + + uint16_t inst_mask; + uint8_t num_inst_per_aid; ++ bool using_unified_queue; + }; + + struct amdgpu_fw_shared_rb_ptrs_struct { +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +index 0d51222f6f8eb..026a3db947298 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +@@ -766,11 +766,15 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params, + struct amdgpu_vm_bo_base *entry) + { + struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry); +- struct amdgpu_bo *bo = parent->bo, *pbo; ++ struct amdgpu_bo *bo, *pbo; + struct amdgpu_vm *vm = params->vm; + uint64_t pde, pt, flags; + unsigned int level; + ++ if (WARN_ON(!parent)) ++ return -EINVAL; ++ ++ bo = parent->bo; + for (level = 0, pbo = bo->parent; pbo; ++level) + pbo = pbo->parent; + +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +index 3560a3f2c848e..cd594b92c6129 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +@@ -7892,22 +7892,15 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, + static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev, + unsigned int vmid) + { +- u32 reg, data; ++ u32 data; + + /* not for *_SOC15 */ +- reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); +- if (amdgpu_sriov_is_pp_one_vf(adev)) +- data = RREG32_NO_KIQ(reg); +- else +- data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL); ++ data = RREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL); + + data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; + +- if (amdgpu_sriov_is_pp_one_vf(adev)) +- WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); +- else +- WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); ++ WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); + } + + static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid) +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +index daab4c7a073ac..c81e98f0d17ff 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +@@ -4961,23 +4961,16 @@ static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev, + + static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) + { +- u32 reg, data; ++ u32 data; + + amdgpu_gfx_off_ctrl(adev, false); + +- reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); +- if (amdgpu_sriov_is_pp_one_vf(adev)) +- data = RREG32_NO_KIQ(reg); +- else +- data = RREG32(reg); ++ data = RREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL); + + data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; + +- if (amdgpu_sriov_is_pp_one_vf(adev)) +- WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); +- else +- WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data); ++ WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); + + amdgpu_gfx_off_ctrl(adev, true); + } +diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c +index 4ab90c7852c3e..ca123ff553477 100644 +--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c +@@ -39,7 +39,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_4_imu.bin"); + + static int imu_v11_0_init_microcode(struct amdgpu_device *adev) + { +- char fw_name[40]; ++ char fw_name[45]; + char ucode_prefix[30]; + int err; + const struct imu_firmware_header_v1_0 *imu_hdr; +diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +index 1c8116d75f63c..fbe57499495ec 100644 +--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +@@ -543,11 +543,11 @@ void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); +- amdgpu_ring_write(ring, (vmid | (vmid << 4))); ++ amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8))); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); +- amdgpu_ring_write(ring, (vmid | (vmid << 4))); ++ amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8))); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); +diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +index 1de79d660285d..78aaaee492e11 100644 +--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c ++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +@@ -23,6 +23,7 @@ + + #include "amdgpu.h" + #include "amdgpu_jpeg.h" ++#include "amdgpu_cs.h" + #include "soc15.h" + #include "soc15d.h" + #include "jpeg_v4_0_3.h" +@@ -769,11 +770,15 @@ static void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring, + + amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); +- amdgpu_ring_write(ring, (vmid | (vmid << 4))); ++ ++ if (ring->funcs->parse_cs) ++ amdgpu_ring_write(ring, 0); ++ else ++ amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8))); + + amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JPEG_VMID_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); +- amdgpu_ring_write(ring, (vmid | (vmid << 4))); ++ amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8))); + + amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); +@@ -1052,6 +1057,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = { + .get_rptr = jpeg_v4_0_3_dec_ring_get_rptr, + .get_wptr = jpeg_v4_0_3_dec_ring_get_wptr, + .set_wptr = jpeg_v4_0_3_dec_ring_set_wptr, ++ .parse_cs = jpeg_v4_0_3_dec_ring_parse_cs, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + +@@ -1216,3 +1222,56 @@ static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev) + { + adev->jpeg.ras = &jpeg_v4_0_3_ras; + } ++ ++/** ++ * jpeg_v4_0_3_dec_ring_parse_cs - command submission parser ++ * ++ * @parser: Command submission parser context ++ * @job: the job to parse ++ * @ib: the IB to parse ++ * ++ * Parse the command stream, return -EINVAL for invalid packet, ++ * 0 otherwise ++ */ ++int jpeg_v4_0_3_dec_ring_parse_cs(struct amdgpu_cs_parser *parser, ++ struct amdgpu_job *job, ++ struct amdgpu_ib *ib) ++{ ++ uint32_t i, reg, res, cond, type; ++ struct amdgpu_device *adev = parser->adev; ++ ++ for (i = 0; i < ib->length_dw ; i += 2) { ++ reg = CP_PACKETJ_GET_REG(ib->ptr[i]); ++ res = CP_PACKETJ_GET_RES(ib->ptr[i]); ++ cond = CP_PACKETJ_GET_COND(ib->ptr[i]); ++ type = CP_PACKETJ_GET_TYPE(ib->ptr[i]); ++ ++ if (res) /* only support 0 at the moment */ ++ return -EINVAL; ++ ++ switch (type) { ++ case PACKETJ_TYPE0: ++ if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END) { ++ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); ++ return -EINVAL; ++ } ++ break; ++ case PACKETJ_TYPE3: ++ if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END) { ++ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); ++ return -EINVAL; ++ } ++ break; ++ case PACKETJ_TYPE6: ++ if (ib->ptr[i] == CP_PACKETJ_NOP) ++ continue; ++ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); ++ return -EINVAL; ++ default: ++ dev_err(adev->dev, "Unknown packet type %d !\n", type); ++ return -EINVAL; ++ } ++ } ++ ++ return 0; ++} +diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h +index 22483dc663518..9598eda9d7156 100644 +--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h ++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h +@@ -46,6 +46,12 @@ + + #define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 + ++#define JPEG_REG_RANGE_START 0x4000 ++#define JPEG_REG_RANGE_END 0x41c2 ++ + extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block; + ++int jpeg_v4_0_3_dec_ring_parse_cs(struct amdgpu_cs_parser *parser, ++ struct amdgpu_job *job, ++ struct amdgpu_ib *ib); + #endif /* __JPEG_V4_0_3_H__ */ +diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h +index 2357ff39323f0..e74e1983da53a 100644 +--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h ++++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h +@@ -76,6 +76,12 @@ + ((cond & 0xF) << 24) | \ + ((type & 0xF) << 28)) + ++#define CP_PACKETJ_NOP 0x60000000 ++#define CP_PACKETJ_GET_REG(x) ((x) & 0x3FFFF) ++#define CP_PACKETJ_GET_RES(x) (((x) >> 18) & 0x3F) ++#define CP_PACKETJ_GET_COND(x) (((x) >> 24) & 0xF) ++#define CP_PACKETJ_GET_TYPE(x) (((x) >> 28) & 0xF) ++ + /* Packet 3 types */ + #define PACKET3_NOP 0x10 + #define PACKET3_SET_BASE 0x11 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +index d33ba4fe9ad5b..9d10530283705 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +@@ -1432,17 +1432,23 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, + goto sync_memory_failed; + } + } +- mutex_unlock(&p->mutex); + +- if (flush_tlb) { +- /* Flush TLBs after waiting for the page table updates to complete */ +- for (i = 0; i < args->n_devices; i++) { +- peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]); +- if (WARN_ON_ONCE(!peer_pdd)) +- continue; ++ /* Flush TLBs after waiting for the page table updates to complete */ ++ for (i = 0; i < args->n_devices; i++) { ++ peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]); ++ if (WARN_ON_ONCE(!peer_pdd)) ++ continue; ++ if (flush_tlb) + kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT); +- } ++ ++ /* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */ ++ err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv); ++ if (err) ++ goto sync_memory_failed; + } ++ ++ mutex_unlock(&p->mutex); ++ + kfree(devices_arr); + + return 0; +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +index e3f4d497d32d5..ff38a85c4fa22 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +@@ -3521,7 +3521,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) + (int)hubp->curs_attr.width || pos_cpy.x + <= (int)hubp->curs_attr.width + + pipe_ctx->plane_state->src_rect.x) { +- pos_cpy.x = temp_x + viewport_width; ++ pos_cpy.x = 2 * viewport_width - temp_x; + } + } + } else { +@@ -3614,7 +3614,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) + (int)hubp->curs_attr.width || pos_cpy.x + <= (int)hubp->curs_attr.width + + pipe_ctx->plane_state->src_rect.x) { +- pos_cpy.x = 2 * viewport_width - temp_x; ++ pos_cpy.x = temp_x + viewport_width; + } + } + } else { +diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +index 8d73cceb485bf..aa4c64eec7b3d 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +@@ -1756,6 +1756,9 @@ static bool dcn321_resource_construct( + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 1; + ++ /* Use pipe context based otg sync logic */ ++ dc->config.use_pipe_ctx_sync_logic = true; ++ + dc->config.dc_mode_clk_limit_support = true; + /* read VBIOS LTTPR caps */ + { +diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c +index 7372eae0b0ef8..babb73147adfb 100644 +--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c ++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c +@@ -1471,9 +1471,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, + return -EINVAL; + } + +-static unsigned int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev, +- enum amd_pp_sensors sensor, +- void *query) ++static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev, ++ enum amd_pp_sensors sensor, ++ void *query) + { + int r, size = sizeof(uint32_t); + +@@ -2787,8 +2787,8 @@ static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev, + return sysfs_emit(buf, "vddnb\n"); + } + +-static unsigned int amdgpu_hwmon_get_power(struct device *dev, +- enum amd_pp_sensors sensor) ++static int amdgpu_hwmon_get_power(struct device *dev, ++ enum amd_pp_sensors sensor) + { + struct amdgpu_device *adev = dev_get_drvdata(dev); + unsigned int uw; +@@ -2809,7 +2809,7 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, + struct device_attribute *attr, + char *buf) + { +- unsigned int val; ++ int val; + + val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER); + if (val < 0) +@@ -2822,7 +2822,7 @@ static ssize_t amdgpu_hwmon_show_power_input(struct device *dev, + struct device_attribute *attr, + char *buf) + { +- unsigned int val; ++ int val; + + val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER); + if (val < 0) +diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c +index 6eed5c4232956..c72d5fbbb0ec4 100644 +--- a/drivers/gpu/drm/bridge/tc358768.c ++++ b/drivers/gpu/drm/bridge/tc358768.c +@@ -9,6 +9,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -157,6 +158,7 @@ struct tc358768_priv { + u32 frs; /* PLL Freqency range for HSCK (post divider) */ + + u32 dsiclk; /* pll_clk / 2 */ ++ u32 pclk; /* incoming pclk rate */ + }; + + static inline struct tc358768_priv *dsi_host_to_tc358768(struct mipi_dsi_host +@@ -380,6 +382,7 @@ static int tc358768_calc_pll(struct tc358768_priv *priv, + priv->prd = best_prd; + priv->frs = frs; + priv->dsiclk = best_pll / 2; ++ priv->pclk = mode->clock * 1000; + + return 0; + } +@@ -638,6 +641,28 @@ static u32 tc358768_ps_to_ns(u32 ps) + return ps / 1000; + } + ++static u32 tc358768_dpi_to_ns(u32 val, u32 pclk) ++{ ++ return (u32)div_u64((u64)val * NANO, pclk); ++} ++ ++/* Convert value in DPI pixel clock units to DSI byte count */ ++static u32 tc358768_dpi_to_dsi_bytes(struct tc358768_priv *priv, u32 val) ++{ ++ u64 m = (u64)val * priv->dsiclk / 4 * priv->dsi_lanes; ++ u64 n = priv->pclk; ++ ++ return (u32)div_u64(m + n - 1, n); ++} ++ ++static u32 tc358768_dsi_bytes_to_ns(struct tc358768_priv *priv, u32 val) ++{ ++ u64 m = (u64)val * NANO; ++ u64 n = priv->dsiclk / 4 * priv->dsi_lanes; ++ ++ return (u32)div_u64(m, n); ++} ++ + static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) + { + struct tc358768_priv *priv = bridge_to_tc358768(bridge); +@@ -647,11 +672,19 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) + s32 raw_val; + const struct drm_display_mode *mode; + u32 hsbyteclk_ps, dsiclk_ps, ui_ps; +- u32 dsiclk, hsbyteclk, video_start; +- const u32 internal_delay = 40; ++ u32 dsiclk, hsbyteclk; + int ret, i; + struct videomode vm; + struct device *dev = priv->dev; ++ /* In pixelclock units */ ++ u32 dpi_htot, dpi_data_start; ++ /* In byte units */ ++ u32 dsi_dpi_htot, dsi_dpi_data_start; ++ u32 dsi_hsw, dsi_hbp, dsi_hact, dsi_hfp; ++ const u32 dsi_hss = 4; /* HSS is a short packet (4 bytes) */ ++ /* In hsbyteclk units */ ++ u32 dsi_vsdly; ++ const u32 internal_dly = 40; + + if (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) { + dev_warn_once(dev, "Non-continuous mode unimplemented, falling back to continuous\n"); +@@ -686,27 +719,23 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) + case MIPI_DSI_FMT_RGB888: + val |= (0x3 << 4); + hact = vm.hactive * 3; +- video_start = (vm.hsync_len + vm.hback_porch) * 3; + data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24; + break; + case MIPI_DSI_FMT_RGB666: + val |= (0x4 << 4); + hact = vm.hactive * 3; +- video_start = (vm.hsync_len + vm.hback_porch) * 3; + data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18; + break; + + case MIPI_DSI_FMT_RGB666_PACKED: + val |= (0x4 << 4) | BIT(3); + hact = vm.hactive * 18 / 8; +- video_start = (vm.hsync_len + vm.hback_porch) * 18 / 8; + data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18; + break; + + case MIPI_DSI_FMT_RGB565: + val |= (0x5 << 4); + hact = vm.hactive * 2; +- video_start = (vm.hsync_len + vm.hback_porch) * 2; + data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16; + break; + default: +@@ -716,9 +745,152 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) + return; + } + ++ /* ++ * There are three important things to make TC358768 work correctly, ++ * which are not trivial to manage: ++ * ++ * 1. Keep the DPI line-time and the DSI line-time as close to each ++ * other as possible. ++ * 2. TC358768 goes to LP mode after each line's active area. The DSI ++ * HFP period has to be long enough for entering and exiting LP mode. ++ * But it is not clear how to calculate this. ++ * 3. VSDly (video start delay) has to be long enough to ensure that the ++ * DSI TX does not start transmitting until we have started receiving ++ * pixel data from the DPI input. It is not clear how to calculate ++ * this either. ++ */ ++ ++ dpi_htot = vm.hactive + vm.hfront_porch + vm.hsync_len + vm.hback_porch; ++ dpi_data_start = vm.hsync_len + vm.hback_porch; ++ ++ dev_dbg(dev, "dpi horiz timing (pclk): %u + %u + %u + %u = %u\n", ++ vm.hsync_len, vm.hback_porch, vm.hactive, vm.hfront_porch, ++ dpi_htot); ++ ++ dev_dbg(dev, "dpi horiz timing (ns): %u + %u + %u + %u = %u\n", ++ tc358768_dpi_to_ns(vm.hsync_len, vm.pixelclock), ++ tc358768_dpi_to_ns(vm.hback_porch, vm.pixelclock), ++ tc358768_dpi_to_ns(vm.hactive, vm.pixelclock), ++ tc358768_dpi_to_ns(vm.hfront_porch, vm.pixelclock), ++ tc358768_dpi_to_ns(dpi_htot, vm.pixelclock)); ++ ++ dev_dbg(dev, "dpi data start (ns): %u + %u = %u\n", ++ tc358768_dpi_to_ns(vm.hsync_len, vm.pixelclock), ++ tc358768_dpi_to_ns(vm.hback_porch, vm.pixelclock), ++ tc358768_dpi_to_ns(dpi_data_start, vm.pixelclock)); ++ ++ dsi_dpi_htot = tc358768_dpi_to_dsi_bytes(priv, dpi_htot); ++ dsi_dpi_data_start = tc358768_dpi_to_dsi_bytes(priv, dpi_data_start); ++ ++ if (dsi_dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) { ++ dsi_hsw = tc358768_dpi_to_dsi_bytes(priv, vm.hsync_len); ++ dsi_hbp = tc358768_dpi_to_dsi_bytes(priv, vm.hback_porch); ++ } else { ++ /* HBP is included in HSW in event mode */ ++ dsi_hbp = 0; ++ dsi_hsw = tc358768_dpi_to_dsi_bytes(priv, ++ vm.hsync_len + ++ vm.hback_porch); ++ ++ /* ++ * The pixel packet includes the actual pixel data, and: ++ * DSI packet header = 4 bytes ++ * DCS code = 1 byte ++ * DSI packet footer = 2 bytes ++ */ ++ dsi_hact = hact + 4 + 1 + 2; ++ ++ dsi_hfp = dsi_dpi_htot - dsi_hact - dsi_hsw - dsi_hss; ++ ++ /* ++ * Here we should check if HFP is long enough for entering LP ++ * and exiting LP, but it's not clear how to calculate that. ++ * Instead, this is a naive algorithm that just adjusts the HFP ++ * and HSW so that HFP is (at least) roughly 2/3 of the total ++ * blanking time. ++ */ ++ if (dsi_hfp < (dsi_hfp + dsi_hsw + dsi_hss) * 2 / 3) { ++ u32 old_hfp = dsi_hfp; ++ u32 old_hsw = dsi_hsw; ++ u32 tot = dsi_hfp + dsi_hsw + dsi_hss; ++ ++ dsi_hsw = tot / 3; ++ ++ /* ++ * Seems like sometimes HSW has to be divisible by num-lanes, but ++ * not always... ++ */ ++ dsi_hsw = roundup(dsi_hsw, priv->dsi_lanes); ++ ++ dsi_hfp = dsi_dpi_htot - dsi_hact - dsi_hsw - dsi_hss; ++ ++ dev_dbg(dev, ++ "hfp too short, adjusting dsi hfp and dsi hsw from %u, %u to %u, %u\n", ++ old_hfp, old_hsw, dsi_hfp, dsi_hsw); ++ } ++ ++ dev_dbg(dev, ++ "dsi horiz timing (bytes): %u, %u + %u + %u + %u = %u\n", ++ dsi_hss, dsi_hsw, dsi_hbp, dsi_hact, dsi_hfp, ++ dsi_hss + dsi_hsw + dsi_hbp + dsi_hact + dsi_hfp); ++ ++ dev_dbg(dev, "dsi horiz timing (ns): %u + %u + %u + %u + %u = %u\n", ++ tc358768_dsi_bytes_to_ns(priv, dsi_hss), ++ tc358768_dsi_bytes_to_ns(priv, dsi_hsw), ++ tc358768_dsi_bytes_to_ns(priv, dsi_hbp), ++ tc358768_dsi_bytes_to_ns(priv, dsi_hact), ++ tc358768_dsi_bytes_to_ns(priv, dsi_hfp), ++ tc358768_dsi_bytes_to_ns(priv, dsi_hss + dsi_hsw + ++ dsi_hbp + dsi_hact + dsi_hfp)); ++ } ++ ++ /* VSDly calculation */ ++ ++ /* Start with the HW internal delay */ ++ dsi_vsdly = internal_dly; ++ ++ /* Convert to byte units as the other variables are in byte units */ ++ dsi_vsdly *= priv->dsi_lanes; ++ ++ /* Do we need more delay, in addition to the internal? */ ++ if (dsi_dpi_data_start > dsi_vsdly + dsi_hss + dsi_hsw + dsi_hbp) { ++ dsi_vsdly = dsi_dpi_data_start - dsi_hss - dsi_hsw - dsi_hbp; ++ dsi_vsdly = roundup(dsi_vsdly, priv->dsi_lanes); ++ } ++ ++ dev_dbg(dev, "dsi data start (bytes) %u + %u + %u + %u = %u\n", ++ dsi_vsdly, dsi_hss, dsi_hsw, dsi_hbp, ++ dsi_vsdly + dsi_hss + dsi_hsw + dsi_hbp); ++ ++ dev_dbg(dev, "dsi data start (ns) %u + %u + %u + %u = %u\n", ++ tc358768_dsi_bytes_to_ns(priv, dsi_vsdly), ++ tc358768_dsi_bytes_to_ns(priv, dsi_hss), ++ tc358768_dsi_bytes_to_ns(priv, dsi_hsw), ++ tc358768_dsi_bytes_to_ns(priv, dsi_hbp), ++ tc358768_dsi_bytes_to_ns(priv, dsi_vsdly + dsi_hss + dsi_hsw + dsi_hbp)); ++ ++ /* Convert back to hsbyteclk */ ++ dsi_vsdly /= priv->dsi_lanes; ++ ++ /* ++ * The docs say that there is an internal delay of 40 cycles. ++ * However, we get underflows if we follow that rule. If we ++ * instead ignore the internal delay, things work. So either ++ * the docs are wrong or the calculations are wrong. ++ * ++ * As a temporary fix, add the internal delay here, to counter ++ * the subtraction when writing the register. ++ */ ++ dsi_vsdly += internal_dly; ++ ++ /* Clamp to the register max */ ++ if (dsi_vsdly - internal_dly > 0x3ff) { ++ dev_warn(dev, "VSDly too high, underflows likely\n"); ++ dsi_vsdly = 0x3ff + internal_dly; ++ } ++ + /* VSDly[9:0] */ +- video_start = max(video_start, internal_delay + 1) - internal_delay; +- tc358768_write(priv, TC358768_VSDLY, video_start); ++ tc358768_write(priv, TC358768_VSDLY, dsi_vsdly - internal_dly); + + tc358768_write(priv, TC358768_DATAFMT, val); + tc358768_write(priv, TC358768_DSITX_DT, data_type); +@@ -826,18 +998,6 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) + + /* vbp */ + tc358768_write(priv, TC358768_DSI_VBPR, vm.vback_porch); +- +- /* hsw * byteclk * ndl / pclk */ +- val = (u32)div_u64(vm.hsync_len * +- (u64)hsbyteclk * priv->dsi_lanes, +- vm.pixelclock); +- tc358768_write(priv, TC358768_DSI_HSW, val); +- +- /* hbp * byteclk * ndl / pclk */ +- val = (u32)div_u64(vm.hback_porch * +- (u64)hsbyteclk * priv->dsi_lanes, +- vm.pixelclock); +- tc358768_write(priv, TC358768_DSI_HBPR, val); + } else { + /* Set event mode */ + tc358768_write(priv, TC358768_DSI_EVENT, 1); +@@ -851,16 +1011,13 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) + + /* vbp (not used in event mode) */ + tc358768_write(priv, TC358768_DSI_VBPR, 0); ++ } + +- /* (hsw + hbp) * byteclk * ndl / pclk */ +- val = (u32)div_u64((vm.hsync_len + vm.hback_porch) * +- (u64)hsbyteclk * priv->dsi_lanes, +- vm.pixelclock); +- tc358768_write(priv, TC358768_DSI_HSW, val); ++ /* hsw (bytes) */ ++ tc358768_write(priv, TC358768_DSI_HSW, dsi_hsw); + +- /* hbp (not used in event mode) */ +- tc358768_write(priv, TC358768_DSI_HBPR, 0); +- } ++ /* hbp (bytes) */ ++ tc358768_write(priv, TC358768_DSI_HBPR, dsi_hbp); + + /* hact (bytes) */ + tc358768_write(priv, TC358768_DSI_HACT, hact); +diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c +index ca3842f719842..82071835ec9ed 100644 +--- a/drivers/gpu/drm/lima/lima_gp.c ++++ b/drivers/gpu/drm/lima/lima_gp.c +@@ -166,6 +166,11 @@ static void lima_gp_task_run(struct lima_sched_pipe *pipe, + gp_write(LIMA_GP_CMD, cmd); + } + ++static int lima_gp_bus_stop_poll(struct lima_ip *ip) ++{ ++ return !!(gp_read(LIMA_GP_STATUS) & LIMA_GP_STATUS_BUS_STOPPED); ++} ++ + static int lima_gp_hard_reset_poll(struct lima_ip *ip) + { + gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC01A0000); +@@ -179,6 +184,13 @@ static int lima_gp_hard_reset(struct lima_ip *ip) + + gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC0FFE000); + gp_write(LIMA_GP_INT_MASK, 0); ++ ++ gp_write(LIMA_GP_CMD, LIMA_GP_CMD_STOP_BUS); ++ ret = lima_poll_timeout(ip, lima_gp_bus_stop_poll, 10, 100); ++ if (ret) { ++ dev_err(dev->dev, "%s bus stop timeout\n", lima_ip_name(ip)); ++ return ret; ++ } + gp_write(LIMA_GP_CMD, LIMA_GP_CMD_RESET); + ret = lima_poll_timeout(ip, lima_gp_hard_reset_poll, 10, 100); + if (ret) { +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +index e454b80907121..6262ec5e40204 100644 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +@@ -124,6 +124,8 @@ enum dpu_enc_rc_states { + * @base: drm_encoder base class for registration with DRM + * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes + * @enabled: True if the encoder is active, protected by enc_lock ++ * @commit_done_timedout: True if there has been a timeout on commit after ++ * enabling the encoder. + * @num_phys_encs: Actual number of physical encoders contained. + * @phys_encs: Container of physical encoders managed. + * @cur_master: Pointer to the current master in this mode. Optimization +@@ -172,6 +174,7 @@ struct dpu_encoder_virt { + spinlock_t enc_spinlock; + + bool enabled; ++ bool commit_done_timedout; + + unsigned int num_phys_encs; + struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL]; +@@ -1116,8 +1119,6 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, + + cstate->num_mixers = num_lm; + +- dpu_enc->connector = conn_state->connector; +- + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; + +@@ -1210,6 +1211,11 @@ static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc, + dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc); + + mutex_lock(&dpu_enc->enc_lock); ++ ++ dpu_enc->commit_done_timedout = false; ++ ++ dpu_enc->connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc); ++ + cur_mode = &dpu_enc->base.crtc->state->adjusted_mode; + + trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay, +@@ -1265,7 +1271,7 @@ static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc, + trace_dpu_enc_disable(DRMID(drm_enc)); + + /* wait for idle */ +- dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE); ++ dpu_encoder_wait_for_tx_complete(drm_enc); + + dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP); + +@@ -2172,6 +2178,7 @@ static void dpu_encoder_early_unregister(struct drm_encoder *encoder) + } + + static int dpu_encoder_virt_add_phys_encs( ++ struct drm_device *dev, + struct msm_display_info *disp_info, + struct dpu_encoder_virt *dpu_enc, + struct dpu_enc_phys_init_params *params) +@@ -2193,7 +2200,7 @@ static int dpu_encoder_virt_add_phys_encs( + + + if (disp_info->intf_type == INTF_WB) { +- enc = dpu_encoder_phys_wb_init(params); ++ enc = dpu_encoder_phys_wb_init(dev, params); + + if (IS_ERR(enc)) { + DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n", +@@ -2204,7 +2211,7 @@ static int dpu_encoder_virt_add_phys_encs( + dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; + ++dpu_enc->num_phys_encs; + } else if (disp_info->is_cmd_mode) { +- enc = dpu_encoder_phys_cmd_init(params); ++ enc = dpu_encoder_phys_cmd_init(dev, params); + + if (IS_ERR(enc)) { + DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n", +@@ -2215,7 +2222,7 @@ static int dpu_encoder_virt_add_phys_encs( + dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; + ++dpu_enc->num_phys_encs; + } else { +- enc = dpu_encoder_phys_vid_init(params); ++ enc = dpu_encoder_phys_vid_init(dev, params); + + if (IS_ERR(enc)) { + DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n", +@@ -2304,7 +2311,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, + break; + } + +- ret = dpu_encoder_virt_add_phys_encs(disp_info, ++ ret = dpu_encoder_virt_add_phys_encs(dpu_kms->dev, disp_info, + dpu_enc, &phys_params); + if (ret) { + DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n"); +@@ -2416,10 +2423,18 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, + return ERR_PTR(ret); + } + +-int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc, +- enum msm_event_wait event) ++/** ++ * dpu_encoder_wait_for_commit_done() - Wait for encoder to flush pending state ++ * @drm_enc: encoder pointer ++ * ++ * Wait for hardware to have flushed the current pending changes to hardware at ++ * a vblank or CTL_START. Physical encoders will map this differently depending ++ * on the type: vid mode -> vsync_irq, cmd mode -> CTL_START. ++ * ++ * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise ++ */ ++int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_enc) + { +- int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL; + struct dpu_encoder_virt *dpu_enc = NULL; + int i, ret = 0; + +@@ -2433,26 +2448,51 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc, + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; + +- switch (event) { +- case MSM_ENC_COMMIT_DONE: +- fn_wait = phys->ops.wait_for_commit_done; +- break; +- case MSM_ENC_TX_COMPLETE: +- fn_wait = phys->ops.wait_for_tx_complete; +- break; +- case MSM_ENC_VBLANK: +- fn_wait = phys->ops.wait_for_vblank; +- break; +- default: +- DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n", +- event); +- return -EINVAL; ++ if (phys->ops.wait_for_commit_done) { ++ DPU_ATRACE_BEGIN("wait_for_commit_done"); ++ ret = phys->ops.wait_for_commit_done(phys); ++ DPU_ATRACE_END("wait_for_commit_done"); ++ if (ret == -ETIMEDOUT && !dpu_enc->commit_done_timedout) { ++ dpu_enc->commit_done_timedout = true; ++ msm_disp_snapshot_state(drm_enc->dev); ++ } ++ if (ret) ++ return ret; + } ++ } ++ ++ return ret; ++} ++ ++/** ++ * dpu_encoder_wait_for_tx_complete() - Wait for encoder to transfer pixels to panel ++ * @drm_enc: encoder pointer ++ * ++ * Wait for the hardware to transfer all the pixels to the panel. Physical ++ * encoders will map this differently depending on the type: vid mode -> vsync_irq, ++ * cmd mode -> pp_done. ++ * ++ * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise ++ */ ++int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_enc) ++{ ++ struct dpu_encoder_virt *dpu_enc = NULL; ++ int i, ret = 0; ++ ++ if (!drm_enc) { ++ DPU_ERROR("invalid encoder\n"); ++ return -EINVAL; ++ } ++ dpu_enc = to_dpu_encoder_virt(drm_enc); ++ DPU_DEBUG_ENC(dpu_enc, "\n"); ++ ++ for (i = 0; i < dpu_enc->num_phys_encs; i++) { ++ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; + +- if (fn_wait) { +- DPU_ATRACE_BEGIN("wait_for_completion_event"); +- ret = fn_wait(phys); +- DPU_ATRACE_END("wait_for_completion_event"); ++ if (phys->ops.wait_for_tx_complete) { ++ DPU_ATRACE_BEGIN("wait_for_tx_complete"); ++ ret = phys->ops.wait_for_tx_complete(phys); ++ DPU_ATRACE_END("wait_for_tx_complete"); + if (ret) + return ret; + } +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h +index fe6b1d312a742..0c928d1876e4a 100644 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h +@@ -93,25 +93,9 @@ void dpu_encoder_kickoff(struct drm_encoder *encoder); + */ + int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time); + +-/** +- * dpu_encoder_wait_for_event - Waits for encoder events +- * @encoder: encoder pointer +- * @event: event to wait for +- * MSM_ENC_COMMIT_DONE - Wait for hardware to have flushed the current pending +- * frames to hardware at a vblank or ctl_start +- * Encoders will map this differently depending on the +- * panel type. +- * vid mode -> vsync_irq +- * cmd mode -> ctl_start +- * MSM_ENC_TX_COMPLETE - Wait for the hardware to transfer all the pixels to +- * the panel. Encoders will map this differently +- * depending on the panel type. +- * vid mode -> vsync_irq +- * cmd mode -> pp_done +- * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise +- */ +-int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder, +- enum msm_event_wait event); ++int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_encoder); ++ ++int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_encoder); + + /* + * dpu_encoder_get_intf_mode - get interface mode of the given encoder +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h +index f91661a698882..57a3598f2a303 100644 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h +@@ -106,7 +106,6 @@ struct dpu_encoder_phys_ops { + int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable); + int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc); + int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc); +- int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc); + void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc); + void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc); + void (*trigger_start)(struct dpu_encoder_phys *phys_enc); +@@ -281,22 +280,24 @@ struct dpu_encoder_wait_info { + * @p: Pointer to init params structure + * Return: Error code or newly allocated encoder + */ +-struct dpu_encoder_phys *dpu_encoder_phys_vid_init( ++struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev, + struct dpu_enc_phys_init_params *p); + + /** + * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder ++ * @dev: Corresponding device for devres management + * @p: Pointer to init params structure + * Return: Error code or newly allocated encoder + */ +-struct dpu_encoder_phys *dpu_encoder_phys_cmd_init( ++struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev, + struct dpu_enc_phys_init_params *p); + + /** + * dpu_encoder_phys_wb_init - initialize writeback encoder ++ * @dev: Corresponding device for devres management + * @init: Pointer to init info structure with initialization params + */ +-struct dpu_encoder_phys *dpu_encoder_phys_wb_init( ++struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev, + struct dpu_enc_phys_init_params *p); + + /** +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +index 718421306247f..83a804ebf8d7e 100644 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +@@ -13,6 +13,8 @@ + #include "dpu_trace.h" + #include "disp/msm_disp_snapshot.h" + ++#include ++ + #define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \ + (e) && (e)->base.parent ? \ + (e)->base.parent->base.id : -1, \ +@@ -564,14 +566,6 @@ static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc) + phys_enc->enable_state = DPU_ENC_DISABLED; + } + +-static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc) +-{ +- struct dpu_encoder_phys_cmd *cmd_enc = +- to_dpu_encoder_phys_cmd(phys_enc); +- +- kfree(cmd_enc); +-} +- + static void dpu_encoder_phys_cmd_prepare_for_kickoff( + struct dpu_encoder_phys *phys_enc) + { +@@ -687,33 +681,6 @@ static int dpu_encoder_phys_cmd_wait_for_commit_done( + return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc); + } + +-static int dpu_encoder_phys_cmd_wait_for_vblank( +- struct dpu_encoder_phys *phys_enc) +-{ +- int rc = 0; +- struct dpu_encoder_phys_cmd *cmd_enc; +- struct dpu_encoder_wait_info wait_info; +- +- cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); +- +- /* only required for master controller */ +- if (!dpu_encoder_phys_cmd_is_master(phys_enc)) +- return rc; +- +- wait_info.wq = &cmd_enc->pending_vblank_wq; +- wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt; +- wait_info.timeout_ms = KICKOFF_TIMEOUT_MS; +- +- atomic_inc(&cmd_enc->pending_vblank_cnt); +- +- rc = dpu_encoder_helper_wait_for_irq(phys_enc, +- phys_enc->irq[INTR_IDX_RDPTR], +- dpu_encoder_phys_cmd_te_rd_ptr_irq, +- &wait_info); +- +- return rc; +-} +- + static void dpu_encoder_phys_cmd_handle_post_kickoff( + struct dpu_encoder_phys *phys_enc) + { +@@ -737,12 +704,10 @@ static void dpu_encoder_phys_cmd_init_ops( + ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set; + ops->enable = dpu_encoder_phys_cmd_enable; + ops->disable = dpu_encoder_phys_cmd_disable; +- ops->destroy = dpu_encoder_phys_cmd_destroy; + ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq; + ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done; + ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff; + ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete; +- ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank; + ops->trigger_start = dpu_encoder_phys_cmd_trigger_start; + ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush; + ops->irq_control = dpu_encoder_phys_cmd_irq_control; +@@ -752,7 +717,7 @@ static void dpu_encoder_phys_cmd_init_ops( + ops->get_line_count = dpu_encoder_phys_cmd_get_line_count; + } + +-struct dpu_encoder_phys *dpu_encoder_phys_cmd_init( ++struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev, + struct dpu_enc_phys_init_params *p) + { + struct dpu_encoder_phys *phys_enc = NULL; +@@ -760,7 +725,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init( + + DPU_DEBUG("intf\n"); + +- cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL); ++ cmd_enc = drmm_kzalloc(dev, sizeof(*cmd_enc), GFP_KERNEL); + if (!cmd_enc) { + DPU_ERROR("failed to allocate\n"); + return ERR_PTR(-ENOMEM); +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +index aec3ca4aa0fb7..daaf0e6047538 100644 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +@@ -11,6 +11,8 @@ + #include "dpu_trace.h" + #include "disp/msm_disp_snapshot.h" + ++#include ++ + #define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \ + (e) && (e)->parent ? \ + (e)->parent->base.id : -1, \ +@@ -441,13 +443,7 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc) + phys_enc->enable_state = DPU_ENC_ENABLING; + } + +-static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc) +-{ +- DPU_DEBUG_VIDENC(phys_enc, "\n"); +- kfree(phys_enc); +-} +- +-static int dpu_encoder_phys_vid_wait_for_vblank( ++static int dpu_encoder_phys_vid_wait_for_tx_complete( + struct dpu_encoder_phys *phys_enc) + { + struct dpu_encoder_wait_info wait_info; +@@ -561,7 +557,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc) + * scanout buffer) don't latch properly.. + */ + if (dpu_encoder_phys_vid_is_master(phys_enc)) { +- ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc); ++ ret = dpu_encoder_phys_vid_wait_for_tx_complete(phys_enc); + if (ret) { + atomic_set(&phys_enc->pending_kickoff_cnt, 0); + DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n", +@@ -581,7 +577,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc) + spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); + dpu_encoder_phys_inc_pending(phys_enc); + spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); +- ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc); ++ ret = dpu_encoder_phys_vid_wait_for_tx_complete(phys_enc); + if (ret) { + atomic_set(&phys_enc->pending_kickoff_cnt, 0); + DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n", +@@ -684,11 +680,9 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops) + ops->atomic_mode_set = dpu_encoder_phys_vid_atomic_mode_set; + ops->enable = dpu_encoder_phys_vid_enable; + ops->disable = dpu_encoder_phys_vid_disable; +- ops->destroy = dpu_encoder_phys_vid_destroy; + ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq; + ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done; +- ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank; +- ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank; ++ ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_tx_complete; + ops->irq_control = dpu_encoder_phys_vid_irq_control; + ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff; + ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff; +@@ -697,7 +691,7 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops) + ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count; + } + +-struct dpu_encoder_phys *dpu_encoder_phys_vid_init( ++struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev, + struct dpu_enc_phys_init_params *p) + { + struct dpu_encoder_phys *phys_enc = NULL; +@@ -707,7 +701,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init( + return ERR_PTR(-EINVAL); + } + +- phys_enc = kzalloc(sizeof(*phys_enc), GFP_KERNEL); ++ phys_enc = drmm_kzalloc(dev, sizeof(*phys_enc), GFP_KERNEL); + if (!phys_enc) { + DPU_ERROR("failed to create encoder due to memory allocation error\n"); + return ERR_PTR(-ENOMEM); +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c +index a81a9ee71a86c..0a45c546b03f2 100644 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c +@@ -8,6 +8,7 @@ + #include + + #include ++#include + + #include "dpu_encoder_phys.h" + #include "dpu_formats.h" +@@ -546,20 +547,6 @@ static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc) + phys_enc->enable_state = DPU_ENC_DISABLED; + } + +-/** +- * dpu_encoder_phys_wb_destroy - destroy writeback encoder +- * @phys_enc: Pointer to physical encoder +- */ +-static void dpu_encoder_phys_wb_destroy(struct dpu_encoder_phys *phys_enc) +-{ +- if (!phys_enc) +- return; +- +- DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0); +- +- kfree(phys_enc); +-} +- + static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc, + struct drm_writeback_job *job) + { +@@ -655,7 +642,6 @@ static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops) + ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set; + ops->enable = dpu_encoder_phys_wb_enable; + ops->disable = dpu_encoder_phys_wb_disable; +- ops->destroy = dpu_encoder_phys_wb_destroy; + ops->atomic_check = dpu_encoder_phys_wb_atomic_check; + ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done; + ops->prepare_for_kickoff = dpu_encoder_phys_wb_prepare_for_kickoff; +@@ -671,9 +657,10 @@ static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops) + + /** + * dpu_encoder_phys_wb_init - initialize writeback encoder ++ * @dev: Corresponding device for devres management + * @p: Pointer to init info structure with initialization params + */ +-struct dpu_encoder_phys *dpu_encoder_phys_wb_init( ++struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev, + struct dpu_enc_phys_init_params *p) + { + struct dpu_encoder_phys *phys_enc = NULL; +@@ -686,7 +673,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_wb_init( + return ERR_PTR(-EINVAL); + } + +- wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL); ++ wb_enc = drmm_kzalloc(dev, sizeof(*wb_enc), GFP_KERNEL); + if (!wb_enc) { + DPU_ERROR("failed to allocate wb phys_enc enc\n"); + return ERR_PTR(-ENOMEM); +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +index aa6ba2cf4b840..6ba289e04b3b2 100644 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +@@ -490,7 +490,7 @@ static void dpu_kms_wait_for_commit_done(struct msm_kms *kms, + * mode panels. This may be a no-op for command mode panels. + */ + trace_dpu_kms_wait_for_commit_done(DRMID(crtc)); +- ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE); ++ ret = dpu_encoder_wait_for_commit_done(encoder); + if (ret && ret != -EWOULDBLOCK) { + DPU_ERROR("wait for commit done returned %d\n", ret); + break; +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +index f5473d4dea92f..8cb3cf842c52c 100644 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +@@ -31,24 +31,14 @@ + * @fmt: Pointer to format string + */ + #define DPU_DEBUG(fmt, ...) \ +- do { \ +- if (drm_debug_enabled(DRM_UT_KMS)) \ +- DRM_DEBUG(fmt, ##__VA_ARGS__); \ +- else \ +- pr_debug(fmt, ##__VA_ARGS__); \ +- } while (0) ++ DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) + + /** + * DPU_DEBUG_DRIVER - macro for hardware driver logging + * @fmt: Pointer to format string + */ + #define DPU_DEBUG_DRIVER(fmt, ...) \ +- do { \ +- if (drm_debug_enabled(DRM_UT_DRIVER)) \ +- DRM_ERROR(fmt, ##__VA_ARGS__); \ +- else \ +- pr_debug(fmt, ##__VA_ARGS__); \ +- } while (0) ++ DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) + + #define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__) + #define DPU_ERROR_RATELIMITED(fmt, ...) pr_err_ratelimited("[dpu error]" fmt, ##__VA_ARGS__) +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +index 0be195f9149c5..637f50a8d42e6 100644 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +@@ -679,6 +679,9 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane, + new_state->fb, &layout); + if (ret) { + DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret); ++ if (pstate->aspace) ++ msm_framebuffer_cleanup(new_state->fb, pstate->aspace, ++ pstate->needs_dirtyfb); + return ret; + } + +@@ -792,6 +795,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, + plane); + int ret = 0, min_scale; + struct dpu_plane *pdpu = to_dpu_plane(plane); ++ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); ++ u64 max_mdp_clk_rate = kms->perf.max_core_clk_rate; + struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); + struct dpu_sw_pipe *pipe = &pstate->pipe; + struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; +@@ -860,14 +865,20 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, + + max_linewidth = pdpu->catalog->caps->max_linewidth; + +- if (drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) { ++ drm_rect_rotate(&pipe_cfg->src_rect, ++ new_plane_state->fb->width, new_plane_state->fb->height, ++ new_plane_state->rotation); ++ ++ if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) || ++ _dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) { + /* + * In parallel multirect case only the half of the usual width + * is supported for tiled formats. If we are here, we know that + * full width is more than max_linewidth, thus each rect is + * wider than allowed. + */ +- if (DPU_FORMAT_IS_UBWC(fmt)) { ++ if (DPU_FORMAT_IS_UBWC(fmt) && ++ drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) { + DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, tiled format\n", + DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); + return -E2BIG; +@@ -907,6 +918,14 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, + r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2; + } + ++ drm_rect_rotate_inv(&pipe_cfg->src_rect, ++ new_plane_state->fb->width, new_plane_state->fb->height, ++ new_plane_state->rotation); ++ if (r_pipe->sspp) ++ drm_rect_rotate_inv(&r_pipe_cfg->src_rect, ++ new_plane_state->fb->width, new_plane_state->fb->height, ++ new_plane_state->rotation); ++ + ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, &crtc_state->adjusted_mode); + if (ret) + return ret; +diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c +index 780e9747be1fb..7472dfd631b83 100644 +--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c ++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c +@@ -1253,6 +1253,8 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, + link_info.rate = ctrl->link->link_params.rate; + link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING; + ++ dp_link_reset_phy_params_vx_px(ctrl->link); ++ + dp_aux_link_configure(ctrl->aux, &link_info); + + if (drm_dp_max_downspread(dpcd)) +diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c +index 86a8e06c7a60f..d26589eb8b218 100644 +--- a/drivers/gpu/drm/msm/dp/dp_panel.c ++++ b/drivers/gpu/drm/msm/dp/dp_panel.c +@@ -136,22 +136,22 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel) + static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel, + u32 mode_edid_bpp, u32 mode_pclk_khz) + { +- struct dp_link_info *link_info; ++ const struct dp_link_info *link_info; + const u32 max_supported_bpp = 30, min_supported_bpp = 18; +- u32 bpp = 0, data_rate_khz = 0; ++ u32 bpp, data_rate_khz; + +- bpp = min_t(u32, mode_edid_bpp, max_supported_bpp); ++ bpp = min(mode_edid_bpp, max_supported_bpp); + + link_info = &dp_panel->link_info; + data_rate_khz = link_info->num_lanes * link_info->rate * 8; + +- while (bpp > min_supported_bpp) { ++ do { + if (mode_pclk_khz * bpp <= data_rate_khz) +- break; ++ return bpp; + bpp -= 6; +- } ++ } while (bpp > min_supported_bpp); + +- return bpp; ++ return min_supported_bpp; + } + + static int dp_panel_update_modes(struct drm_connector *connector, +@@ -444,8 +444,9 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel) + drm_mode->clock); + drm_dbg_dp(panel->drm_dev, "bpp = %d\n", dp_panel->dp_mode.bpp); + +- dp_panel->dp_mode.bpp = max_t(u32, 18, +- min_t(u32, dp_panel->dp_mode.bpp, 30)); ++ dp_panel->dp_mode.bpp = dp_panel_get_mode_bpp(dp_panel, dp_panel->dp_mode.bpp, ++ dp_panel->dp_mode.drm_mode.clock); ++ + drm_dbg_dp(panel->drm_dev, "updated bpp = %d\n", + dp_panel->dp_mode.bpp); + +diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h +index 02fd6c7d0bb7b..48e1a8c6942c9 100644 +--- a/drivers/gpu/drm/msm/msm_drv.h ++++ b/drivers/gpu/drm/msm/msm_drv.h +@@ -74,18 +74,6 @@ enum msm_dsi_controller { + #define MSM_GPU_MAX_RINGS 4 + #define MAX_H_TILES_PER_DISPLAY 2 + +-/** +- * enum msm_event_wait - type of HW events to wait for +- * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW +- * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel +- * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters) +- */ +-enum msm_event_wait { +- MSM_ENC_COMMIT_DONE = 0, +- MSM_ENC_TX_COMPLETE, +- MSM_ENC_VBLANK, +-}; +- + /** + * struct msm_display_topology - defines a display topology pipeline + * @num_lm: number of layer mixers used +diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c +index f38296ad87434..0641f5bb8649a 100644 +--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c ++++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c +@@ -76,7 +76,7 @@ static bool + wait_for_idle(struct drm_gem_object *obj) + { + enum dma_resv_usage usage = dma_resv_usage_rw(true); +- return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0; ++ return dma_resv_wait_timeout(obj->resv, usage, false, 10) > 0; + } + + static bool +diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c +index 348c66b146834..83b49d489cb1c 100644 +--- a/drivers/gpu/drm/msm/msm_mdss.c ++++ b/drivers/gpu/drm/msm/msm_mdss.c +@@ -28,6 +28,8 @@ + + #define MIN_IB_BW 400000000UL /* Min ib vote 400MB */ + ++#define DEFAULT_REG_BW 153600 /* Used in mdss fbdev driver */ ++ + struct msm_mdss { + struct device *dev; + +@@ -40,8 +42,9 @@ struct msm_mdss { + struct irq_domain *domain; + } irq_controller; + const struct msm_mdss_data *mdss_data; +- struct icc_path *path[2]; +- u32 num_paths; ++ struct icc_path *mdp_path[2]; ++ u32 num_mdp_paths; ++ struct icc_path *reg_bus_path; + }; + + static int msm_mdss_parse_data_bus_icc_path(struct device *dev, +@@ -49,38 +52,34 @@ static int msm_mdss_parse_data_bus_icc_path(struct device *dev, + { + struct icc_path *path0; + struct icc_path *path1; ++ struct icc_path *reg_bus_path; + +- path0 = of_icc_get(dev, "mdp0-mem"); ++ path0 = devm_of_icc_get(dev, "mdp0-mem"); + if (IS_ERR_OR_NULL(path0)) + return PTR_ERR_OR_ZERO(path0); + +- msm_mdss->path[0] = path0; +- msm_mdss->num_paths = 1; ++ msm_mdss->mdp_path[0] = path0; ++ msm_mdss->num_mdp_paths = 1; + +- path1 = of_icc_get(dev, "mdp1-mem"); ++ path1 = devm_of_icc_get(dev, "mdp1-mem"); + if (!IS_ERR_OR_NULL(path1)) { +- msm_mdss->path[1] = path1; +- msm_mdss->num_paths++; ++ msm_mdss->mdp_path[1] = path1; ++ msm_mdss->num_mdp_paths++; + } + +- return 0; +-} +- +-static void msm_mdss_put_icc_path(void *data) +-{ +- struct msm_mdss *msm_mdss = data; +- int i; ++ reg_bus_path = of_icc_get(dev, "cpu-cfg"); ++ if (!IS_ERR_OR_NULL(reg_bus_path)) ++ msm_mdss->reg_bus_path = reg_bus_path; + +- for (i = 0; i < msm_mdss->num_paths; i++) +- icc_put(msm_mdss->path[i]); ++ return 0; + } + + static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw) + { + int i; + +- for (i = 0; i < msm_mdss->num_paths; i++) +- icc_set_bw(msm_mdss->path[i], 0, Bps_to_icc(bw)); ++ for (i = 0; i < msm_mdss->num_mdp_paths; i++) ++ icc_set_bw(msm_mdss->mdp_path[i], 0, Bps_to_icc(bw)); + } + + static void msm_mdss_irq(struct irq_desc *desc) +@@ -245,6 +244,13 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss) + */ + msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW); + ++ if (msm_mdss->mdss_data && msm_mdss->mdss_data->reg_bus_bw) ++ icc_set_bw(msm_mdss->reg_bus_path, 0, ++ msm_mdss->mdss_data->reg_bus_bw); ++ else ++ icc_set_bw(msm_mdss->reg_bus_path, 0, ++ DEFAULT_REG_BW); ++ + ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks); + if (ret) { + dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret); +@@ -298,6 +304,9 @@ static int msm_mdss_disable(struct msm_mdss *msm_mdss) + clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks); + msm_mdss_icc_request_bw(msm_mdss, 0); + ++ if (msm_mdss->reg_bus_path) ++ icc_set_bw(msm_mdss->reg_bus_path, 0, 0); ++ + return 0; + } + +@@ -384,6 +393,8 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5 + if (!msm_mdss) + return ERR_PTR(-ENOMEM); + ++ msm_mdss->mdss_data = of_device_get_match_data(&pdev->dev); ++ + msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss"); + if (IS_ERR(msm_mdss->mmio)) + return ERR_CAST(msm_mdss->mmio); +@@ -391,9 +402,6 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5 + dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio); + + ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss); +- if (ret) +- return ERR_PTR(ret); +- ret = devm_add_action_or_reset(&pdev->dev, msm_mdss_put_icc_path, msm_mdss); + if (ret) + return ERR_PTR(ret); + +@@ -477,8 +485,6 @@ static int mdss_probe(struct platform_device *pdev) + if (IS_ERR(mdss)) + return PTR_ERR(mdss); + +- mdss->mdss_data = of_device_get_match_data(&pdev->dev); +- + platform_set_drvdata(pdev, mdss); + + /* +@@ -512,18 +518,21 @@ static const struct msm_mdss_data msm8998_data = { + .ubwc_enc_version = UBWC_1_0, + .ubwc_dec_version = UBWC_1_0, + .highest_bank_bit = 2, ++ .reg_bus_bw = 76800, + }; + + static const struct msm_mdss_data qcm2290_data = { + /* no UBWC */ + .highest_bank_bit = 0x2, ++ .reg_bus_bw = 76800, + }; + + static const struct msm_mdss_data sc7180_data = { + .ubwc_enc_version = UBWC_2_0, + .ubwc_dec_version = UBWC_2_0, + .ubwc_static = 0x1e, +- .highest_bank_bit = 0x3, ++ .highest_bank_bit = 0x1, ++ .reg_bus_bw = 76800, + }; + + static const struct msm_mdss_data sc7280_data = { +@@ -533,6 +542,7 @@ static const struct msm_mdss_data sc7280_data = { + .ubwc_static = 1, + .highest_bank_bit = 1, + .macrotile_mode = 1, ++ .reg_bus_bw = 74000, + }; + + static const struct msm_mdss_data sc8180x_data = { +@@ -540,6 +550,7 @@ static const struct msm_mdss_data sc8180x_data = { + .ubwc_dec_version = UBWC_3_0, + .highest_bank_bit = 3, + .macrotile_mode = 1, ++ .reg_bus_bw = 76800, + }; + + static const struct msm_mdss_data sc8280xp_data = { +@@ -549,12 +560,14 @@ static const struct msm_mdss_data sc8280xp_data = { + .ubwc_static = 1, + .highest_bank_bit = 2, + .macrotile_mode = 1, ++ .reg_bus_bw = 76800, + }; + + static const struct msm_mdss_data sdm845_data = { + .ubwc_enc_version = UBWC_2_0, + .ubwc_dec_version = UBWC_2_0, + .highest_bank_bit = 2, ++ .reg_bus_bw = 76800, + }; + + static const struct msm_mdss_data sm6350_data = { +@@ -563,12 +576,14 @@ static const struct msm_mdss_data sm6350_data = { + .ubwc_swizzle = 6, + .ubwc_static = 0x1e, + .highest_bank_bit = 1, ++ .reg_bus_bw = 76800, + }; + + static const struct msm_mdss_data sm8150_data = { + .ubwc_enc_version = UBWC_3_0, + .ubwc_dec_version = UBWC_3_0, + .highest_bank_bit = 2, ++ .reg_bus_bw = 76800, + }; + + static const struct msm_mdss_data sm6115_data = { +@@ -577,6 +592,7 @@ static const struct msm_mdss_data sm6115_data = { + .ubwc_swizzle = 7, + .ubwc_static = 0x11f, + .highest_bank_bit = 0x1, ++ .reg_bus_bw = 76800, + }; + + static const struct msm_mdss_data sm6125_data = { +@@ -584,6 +600,7 @@ static const struct msm_mdss_data sm6125_data = { + .ubwc_dec_version = UBWC_3_0, + .ubwc_swizzle = 1, + .highest_bank_bit = 1, ++ .reg_bus_bw = 76800, + }; + + static const struct msm_mdss_data sm8250_data = { +@@ -594,6 +611,18 @@ static const struct msm_mdss_data sm8250_data = { + /* TODO: highest_bank_bit = 2 for LP_DDR4 */ + .highest_bank_bit = 3, + .macrotile_mode = 1, ++ .reg_bus_bw = 76800, ++}; ++ ++static const struct msm_mdss_data sm8350_data = { ++ .ubwc_enc_version = UBWC_4_0, ++ .ubwc_dec_version = UBWC_4_0, ++ .ubwc_swizzle = 6, ++ .ubwc_static = 1, ++ /* TODO: highest_bank_bit = 2 for LP_DDR4 */ ++ .highest_bank_bit = 3, ++ .macrotile_mode = 1, ++ .reg_bus_bw = 74000, + }; + + static const struct msm_mdss_data sm8550_data = { +@@ -604,6 +633,7 @@ static const struct msm_mdss_data sm8550_data = { + /* TODO: highest_bank_bit = 2 for LP_DDR4 */ + .highest_bank_bit = 3, + .macrotile_mode = 1, ++ .reg_bus_bw = 57000, + }; + static const struct of_device_id mdss_dt_match[] = { + { .compatible = "qcom,mdss" }, +@@ -620,8 +650,8 @@ static const struct of_device_id mdss_dt_match[] = { + { .compatible = "qcom,sm6375-mdss", .data = &sm6350_data }, + { .compatible = "qcom,sm8150-mdss", .data = &sm8150_data }, + { .compatible = "qcom,sm8250-mdss", .data = &sm8250_data }, +- { .compatible = "qcom,sm8350-mdss", .data = &sm8250_data }, +- { .compatible = "qcom,sm8450-mdss", .data = &sm8250_data }, ++ { .compatible = "qcom,sm8350-mdss", .data = &sm8350_data }, ++ { .compatible = "qcom,sm8450-mdss", .data = &sm8350_data }, + { .compatible = "qcom,sm8550-mdss", .data = &sm8550_data }, + {} + }; +diff --git a/drivers/gpu/drm/msm/msm_mdss.h b/drivers/gpu/drm/msm/msm_mdss.h +index 02bbab42adbc0..3afef4b1786d2 100644 +--- a/drivers/gpu/drm/msm/msm_mdss.h ++++ b/drivers/gpu/drm/msm/msm_mdss.h +@@ -14,6 +14,7 @@ struct msm_mdss_data { + u32 ubwc_static; + u32 highest_bank_bit; + u32 macrotile_mode; ++ u32 reg_bus_bw; + }; + + #define UBWC_1_0 0x10000000 +diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c +index 91fb494d40093..afc8d4e3e16f4 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c ++++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c +@@ -187,7 +187,8 @@ nvkm_firmware_dtor(struct nvkm_firmware *fw) + break; + case NVKM_FIRMWARE_IMG_DMA: + nvkm_memory_unref(&memory); +- dma_free_coherent(fw->device->dev, sg_dma_len(&fw->mem.sgl), fw->img, fw->phys); ++ dma_free_noncoherent(fw->device->dev, sg_dma_len(&fw->mem.sgl), ++ fw->img, fw->phys, DMA_TO_DEVICE); + break; + default: + WARN_ON(1); +@@ -212,10 +213,12 @@ nvkm_firmware_ctor(const struct nvkm_firmware_func *func, const char *name, + break; + case NVKM_FIRMWARE_IMG_DMA: { + dma_addr_t addr; +- + len = ALIGN(fw->len, PAGE_SIZE); + +- fw->img = dma_alloc_coherent(fw->device->dev, len, &addr, GFP_KERNEL); ++ fw->img = dma_alloc_noncoherent(fw->device->dev, ++ len, &addr, ++ DMA_TO_DEVICE, ++ GFP_KERNEL); + if (fw->img) { + memcpy(fw->img, src, fw->len); + fw->phys = addr; +diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c +index 80a480b121746..a1c8545f1249a 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c ++++ b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c +@@ -89,6 +89,12 @@ nvkm_falcon_fw_boot(struct nvkm_falcon_fw *fw, struct nvkm_subdev *user, + nvkm_falcon_fw_dtor_sigs(fw); + } + ++ /* after last write to the img, sync dma mappings */ ++ dma_sync_single_for_device(fw->fw.device->dev, ++ fw->fw.phys, ++ sg_dma_len(&fw->fw.mem.sgl), ++ DMA_TO_DEVICE); ++ + FLCNFW_DBG(fw, "resetting"); + fw->func->reset(fw); + +diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c +index c4a804c5d6aac..aab8937595a2a 100644 +--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c ++++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c +@@ -935,8 +935,7 @@ static int j606f_boe_init_sequence(struct panel_info *pinfo) + + static const struct drm_display_mode elish_boe_modes[] = { + { +- /* There is only one 120 Hz timing, but it doesn't work perfectly, 104 Hz preferred */ +- .clock = (1600 + 60 + 8 + 60) * (2560 + 26 + 4 + 168) * 104 / 1000, ++ .clock = (1600 + 60 + 8 + 60) * (2560 + 26 + 4 + 168) * 120 / 1000, + .hdisplay = 1600, + .hsync_start = 1600 + 60, + .hsync_end = 1600 + 60 + 8, +@@ -950,8 +949,7 @@ static const struct drm_display_mode elish_boe_modes[] = { + + static const struct drm_display_mode elish_csot_modes[] = { + { +- /* There is only one 120 Hz timing, but it doesn't work perfectly, 104 Hz preferred */ +- .clock = (1600 + 200 + 40 + 52) * (2560 + 26 + 4 + 168) * 104 / 1000, ++ .clock = (1600 + 200 + 40 + 52) * (2560 + 26 + 4 + 168) * 120 / 1000, + .hdisplay = 1600, + .hsync_start = 1600 + 200, + .hsync_end = 1600 + 200 + 40, +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +index f2a956f973613..d1de12e850e74 100644 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +@@ -1260,6 +1260,11 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, + vop2_win_write(win, VOP2_WIN_AFBC_ROTATE_270, rotate_270); + vop2_win_write(win, VOP2_WIN_AFBC_ROTATE_90, rotate_90); + } else { ++ if (vop2_cluster_window(win)) { ++ vop2_win_write(win, VOP2_WIN_AFBC_ENABLE, 0); ++ vop2_win_write(win, VOP2_WIN_AFBC_TRANSFORM_OFFSET, 0); ++ } ++ + vop2_win_write(win, VOP2_WIN_YRGB_VIR, DIV_ROUND_UP(fb->pitches[0], 4)); + } + +diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c +index a4023163493dc..a825fbbc01af6 100644 +--- a/drivers/gpu/drm/tegra/gem.c ++++ b/drivers/gpu/drm/tegra/gem.c +@@ -177,7 +177,7 @@ static void tegra_bo_unpin(struct host1x_bo_mapping *map) + static void *tegra_bo_mmap(struct host1x_bo *bo) + { + struct tegra_bo *obj = host1x_to_tegra_bo(bo); +- struct iosys_map map; ++ struct iosys_map map = { 0 }; + int ret; + + if (obj->vaddr) { +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c +index d2fe14ce423e2..5db26a8af7728 100644 +--- a/drivers/hid/wacom_wac.c ++++ b/drivers/hid/wacom_wac.c +@@ -1924,12 +1924,14 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage, + int fmax = field->logical_maximum; + unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid); + int resolution_code = code; +- int resolution = hidinput_calc_abs_res(field, resolution_code); ++ int resolution; + + if (equivalent_usage == HID_DG_TWIST) { + resolution_code = ABS_RZ; + } + ++ resolution = hidinput_calc_abs_res(field, resolution_code); ++ + if (equivalent_usage == HID_GD_X) { + fmin += features->offset_left; + fmax -= features->offset_right; +diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c +index 589bcd07ce7f7..b8548105cd67a 100644 +--- a/drivers/hwmon/ltc2992.c ++++ b/drivers/hwmon/ltc2992.c +@@ -875,8 +875,14 @@ static int ltc2992_parse_dt(struct ltc2992_state *st) + } + + ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val); +- if (!ret) ++ if (!ret) { ++ if (!val) { ++ fwnode_handle_put(child); ++ return dev_err_probe(&st->client->dev, -EINVAL, ++ "shunt resistor value cannot be zero\n"); ++ } + st->r_sense_uohm[addr] = val; ++ } + } + + return 0; +diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c +index a4adc8bd531ff..534a6072036c9 100644 +--- a/drivers/hwmon/pc87360.c ++++ b/drivers/hwmon/pc87360.c +@@ -323,7 +323,11 @@ static struct pc87360_data *pc87360_update_device(struct device *dev) + } + + /* Voltages */ +- for (i = 0; i < data->innr; i++) { ++ /* ++ * The min() below does not have any practical meaning and is ++ * only needed to silence a warning observed with gcc 12+. ++ */ ++ for (i = 0; i < min(data->innr, ARRAY_SIZE(data->in)); i++) { + data->in_status[i] = pc87360_read_value(data, LD_IN, i, + PC87365_REG_IN_STATUS); + /* Clear bits */ +diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c +index b17411e97be68..d665c1c7c3274 100644 +--- a/drivers/i2c/busses/i2c-qcom-geni.c ++++ b/drivers/i2c/busses/i2c-qcom-geni.c +@@ -987,8 +987,10 @@ static int __maybe_unused geni_i2c_runtime_resume(struct device *dev) + return ret; + + ret = clk_prepare_enable(gi2c->core_clk); +- if (ret) ++ if (ret) { ++ geni_icc_disable(&gi2c->se); + return ret; ++ } + + ret = geni_se_resources_on(&gi2c->se); + if (ret) { +diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c +index f0ee8871d5ae1..e43ff483c56ec 100644 +--- a/drivers/i2c/busses/i2c-riic.c ++++ b/drivers/i2c/busses/i2c-riic.c +@@ -313,7 +313,7 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t) + * frequency with only 62 clock ticks max (31 high, 31 low). + * Aim for a duty of 60% LOW, 40% HIGH. + */ +- total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz); ++ total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz ?: 1); + + for (cks = 0; cks < 7; cks++) { + /* +diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c +index 0d3c9a041b561..887d55ae40969 100644 +--- a/drivers/i2c/busses/i2c-stm32f7.c ++++ b/drivers/i2c/busses/i2c-stm32f7.c +@@ -357,6 +357,7 @@ struct stm32f7_i2c_dev { + u32 dnf_dt; + u32 dnf; + struct stm32f7_i2c_alert *alert; ++ bool atomic; + }; + + /* +@@ -915,7 +916,8 @@ static void stm32f7_i2c_xfer_msg(struct stm32f7_i2c_dev *i2c_dev, + + /* Configure DMA or enable RX/TX interrupt */ + i2c_dev->use_dma = false; +- if (i2c_dev->dma && f7_msg->count >= STM32F7_I2C_DMA_LEN_MIN) { ++ if (i2c_dev->dma && f7_msg->count >= STM32F7_I2C_DMA_LEN_MIN ++ && !i2c_dev->atomic) { + ret = stm32_i2c_prep_dma_xfer(i2c_dev->dev, i2c_dev->dma, + msg->flags & I2C_M_RD, + f7_msg->count, f7_msg->buf, +@@ -939,6 +941,9 @@ static void stm32f7_i2c_xfer_msg(struct stm32f7_i2c_dev *i2c_dev, + cr1 |= STM32F7_I2C_CR1_TXDMAEN; + } + ++ if (i2c_dev->atomic) ++ cr1 &= ~STM32F7_I2C_ALL_IRQ_MASK; /* Disable all interrupts */ ++ + /* Configure Start/Repeated Start */ + cr2 |= STM32F7_I2C_CR2_START; + +@@ -1673,7 +1678,22 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data) + return IRQ_HANDLED; + } + +-static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap, ++static int stm32f7_i2c_wait_polling(struct stm32f7_i2c_dev *i2c_dev) ++{ ++ ktime_t timeout = ktime_add_ms(ktime_get(), i2c_dev->adap.timeout); ++ ++ while (ktime_compare(ktime_get(), timeout) < 0) { ++ udelay(5); ++ stm32f7_i2c_isr_event(0, i2c_dev); ++ ++ if (completion_done(&i2c_dev->complete)) ++ return 1; ++ } ++ ++ return 0; ++} ++ ++static int stm32f7_i2c_xfer_core(struct i2c_adapter *i2c_adap, + struct i2c_msg msgs[], int num) + { + struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap); +@@ -1697,8 +1717,12 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap, + + stm32f7_i2c_xfer_msg(i2c_dev, msgs); + +- time_left = wait_for_completion_timeout(&i2c_dev->complete, +- i2c_dev->adap.timeout); ++ if (!i2c_dev->atomic) ++ time_left = wait_for_completion_timeout(&i2c_dev->complete, ++ i2c_dev->adap.timeout); ++ else ++ time_left = stm32f7_i2c_wait_polling(i2c_dev); ++ + ret = f7_msg->result; + if (ret) { + if (i2c_dev->use_dma) +@@ -1730,6 +1754,24 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap, + return (ret < 0) ? ret : num; + } + ++static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap, ++ struct i2c_msg msgs[], int num) ++{ ++ struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap); ++ ++ i2c_dev->atomic = false; ++ return stm32f7_i2c_xfer_core(i2c_adap, msgs, num); ++} ++ ++static int stm32f7_i2c_xfer_atomic(struct i2c_adapter *i2c_adap, ++ struct i2c_msg msgs[], int num) ++{ ++ struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap); ++ ++ i2c_dev->atomic = true; ++ return stm32f7_i2c_xfer_core(i2c_adap, msgs, num); ++} ++ + static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, + unsigned short flags, char read_write, + u8 command, int size, +@@ -2098,6 +2140,7 @@ static u32 stm32f7_i2c_func(struct i2c_adapter *adap) + + static const struct i2c_algorithm stm32f7_i2c_algo = { + .master_xfer = stm32f7_i2c_xfer, ++ .master_xfer_atomic = stm32f7_i2c_xfer_atomic, + .smbus_xfer = stm32f7_i2c_smbus_xfer, + .functionality = stm32f7_i2c_func, + .reg_slave = stm32f7_i2c_reg_slave, +diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c +index 920d5a8cbf4c7..91be04b534fe6 100644 +--- a/drivers/i2c/busses/i2c-tegra.c ++++ b/drivers/i2c/busses/i2c-tegra.c +@@ -1804,9 +1804,9 @@ static int tegra_i2c_probe(struct platform_device *pdev) + * domain. + * + * VI I2C device shouldn't be marked as IRQ-safe because VI I2C won't +- * be used for atomic transfers. ++ * be used for atomic transfers. ACPI device is not IRQ safe also. + */ +- if (!IS_VI(i2c_dev)) ++ if (!IS_VI(i2c_dev) && !has_acpi_companion(i2c_dev->dev)) + pm_runtime_irq_safe(i2c_dev->dev); + + pm_runtime_enable(i2c_dev->dev); +diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c +index 71b5dbe45c45c..337c95d43f3f6 100644 +--- a/drivers/i3c/master/mipi-i3c-hci/dma.c ++++ b/drivers/i3c/master/mipi-i3c-hci/dma.c +@@ -345,6 +345,8 @@ static void hci_dma_unmap_xfer(struct i3c_hci *hci, + + for (i = 0; i < n; i++) { + xfer = xfer_list + i; ++ if (!xfer->data) ++ continue; + dma_unmap_single(&hci->master.dev, + xfer->data_dma, xfer->data_len, + xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE); +@@ -450,10 +452,9 @@ static bool hci_dma_dequeue_xfer(struct i3c_hci *hci, + /* + * We're deep in it if ever this condition is ever met. + * Hardware might still be writing to memory, etc. +- * Better suspend the world than risking silent corruption. + */ + dev_crit(&hci->master.dev, "unable to abort the ring\n"); +- BUG(); ++ WARN_ON(1); + } + + for (i = 0; i < n; i++) { +diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c +index 0814291a04120..9b542f7c6c115 100644 +--- a/drivers/infiniband/hw/hfi1/chip.c ++++ b/drivers/infiniband/hw/hfi1/chip.c +@@ -13185,15 +13185,16 @@ static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits, + { + u64 reg; + u16 idx = src / BITS_PER_REGISTER; ++ unsigned long flags; + +- spin_lock(&dd->irq_src_lock); ++ spin_lock_irqsave(&dd->irq_src_lock, flags); + reg = read_csr(dd, CCE_INT_MASK + (8 * idx)); + if (set) + reg |= bits; + else + reg &= ~bits; + write_csr(dd, CCE_INT_MASK + (8 * idx), reg); +- spin_unlock(&dd->irq_src_lock); ++ spin_unlock_irqrestore(&dd->irq_src_lock, flags); + } + + /** +diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c +index 3696f367ff515..d80edfffd2e49 100644 +--- a/drivers/infiniband/ulp/rtrs/rtrs.c ++++ b/drivers/infiniband/ulp/rtrs/rtrs.c +@@ -255,7 +255,7 @@ static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe, + static int create_qp(struct rtrs_con *con, struct ib_pd *pd, + u32 max_send_wr, u32 max_recv_wr, u32 max_sge) + { +- struct ib_qp_init_attr init_attr = {NULL}; ++ struct ib_qp_init_attr init_attr = {}; + struct rdma_cm_id *cm_id = con->cm_id; + int ret; + +diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c +index 14b53dac1253b..6b04a674f832a 100644 +--- a/drivers/input/input-mt.c ++++ b/drivers/input/input-mt.c +@@ -46,6 +46,9 @@ int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots, + return 0; + if (mt) + return mt->num_slots != num_slots ? -EINVAL : 0; ++ /* Arbitrary limit for avoiding too large memory allocation. */ ++ if (num_slots > 1024) ++ return -EINVAL; + + mt = kzalloc(struct_size(mt, slots, num_slots), GFP_KERNEL); + if (!mt) +diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h +index 5b50475ec4140..e9eb9554dd7bd 100644 +--- a/drivers/input/serio/i8042-acpipnpio.h ++++ b/drivers/input/serio/i8042-acpipnpio.h +@@ -83,6 +83,7 @@ static inline void i8042_write_command(int val) + #define SERIO_QUIRK_KBDRESET BIT(12) + #define SERIO_QUIRK_DRITEK BIT(13) + #define SERIO_QUIRK_NOPNP BIT(14) ++#define SERIO_QUIRK_FORCENORESTORE BIT(15) + + /* Quirk table for different mainboards. Options similar or identical to i8042 + * module parameters. +@@ -1149,18 +1150,10 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + }, + { +- /* +- * Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes +- * the keyboard very laggy for ~5 seconds after boot and +- * sometimes also after resume. +- * However both are required for the keyboard to not fail +- * completely sometimes after boot or resume. +- */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "N150CU"), + }, +- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | +- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) ++ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) + }, + { + .matches = { +@@ -1685,6 +1678,8 @@ static void __init i8042_check_quirks(void) + if (quirks & SERIO_QUIRK_NOPNP) + i8042_nopnp = true; + #endif ++ if (quirks & SERIO_QUIRK_FORCENORESTORE) ++ i8042_forcenorestore = true; + } + #else + static inline void i8042_check_quirks(void) {} +@@ -1718,7 +1713,7 @@ static int __init i8042_platform_init(void) + + i8042_check_quirks(); + +- pr_debug("Active quirks (empty means none):%s%s%s%s%s%s%s%s%s%s%s%s%s\n", ++ pr_debug("Active quirks (empty means none):%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", + i8042_nokbd ? " nokbd" : "", + i8042_noaux ? " noaux" : "", + i8042_nomux ? " nomux" : "", +@@ -1738,10 +1733,11 @@ static int __init i8042_platform_init(void) + "", + #endif + #ifdef CONFIG_PNP +- i8042_nopnp ? " nopnp" : ""); ++ i8042_nopnp ? " nopnp" : "", + #else +- ""); ++ "", + #endif ++ i8042_forcenorestore ? " forcenorestore" : ""); + + retval = i8042_pnp_init(); + if (retval) +diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c +index 6dac7c1853a54..29340f8095bb2 100644 +--- a/drivers/input/serio/i8042.c ++++ b/drivers/input/serio/i8042.c +@@ -115,6 +115,10 @@ module_param_named(nopnp, i8042_nopnp, bool, 0); + MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings"); + #endif + ++static bool i8042_forcenorestore; ++module_param_named(forcenorestore, i8042_forcenorestore, bool, 0); ++MODULE_PARM_DESC(forcenorestore, "Force no restore on s3 resume, copying s2idle behaviour"); ++ + #define DEBUG + #ifdef DEBUG + static bool i8042_debug; +@@ -1232,7 +1236,7 @@ static int i8042_pm_suspend(struct device *dev) + { + int i; + +- if (pm_suspend_via_firmware()) ++ if (!i8042_forcenorestore && pm_suspend_via_firmware()) + i8042_controller_reset(true); + + /* Set up serio interrupts for system wakeup. */ +@@ -1248,7 +1252,7 @@ static int i8042_pm_suspend(struct device *dev) + + static int i8042_pm_resume_noirq(struct device *dev) + { +- if (!pm_resume_via_firmware()) ++ if (i8042_forcenorestore || !pm_resume_via_firmware()) + i8042_interrupt(0, NULL); + + return 0; +@@ -1271,7 +1275,7 @@ static int i8042_pm_resume(struct device *dev) + * not restore the controller state to whatever it had been at boot + * time, so we do not need to do anything. + */ +- if (!pm_suspend_via_firmware()) ++ if (i8042_forcenorestore || !pm_suspend_via_firmware()) + return 0; + + /* +diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +index 40503376d80cc..6e6cb19c81f59 100644 +--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c ++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +@@ -252,6 +252,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { + { .compatible = "qcom,sc7280-mss-pil" }, + { .compatible = "qcom,sc8180x-mdss" }, + { .compatible = "qcom,sc8280xp-mdss" }, ++ { .compatible = "qcom,sdm670-mdss" }, + { .compatible = "qcom,sdm845-mdss" }, + { .compatible = "qcom,sdm845-mss-pil" }, + { .compatible = "qcom,sm6350-mdss" }, +diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c +index 2da969fc89900..f5eb97726d1bb 100644 +--- a/drivers/iommu/dma-iommu.c ++++ b/drivers/iommu/dma-iommu.c +@@ -1614,7 +1614,7 @@ static const struct dma_map_ops iommu_dma_ops = { + .flags = DMA_F_PCI_P2PDMA_SUPPORTED, + .alloc = iommu_dma_alloc, + .free = iommu_dma_free, +- .alloc_pages = dma_common_alloc_pages, ++ .alloc_pages_op = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + .alloc_noncontiguous = iommu_dma_alloc_noncontiguous, + .free_noncontiguous = iommu_dma_free_noncontiguous, +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c +index c7d6e6987166f..350abbb36e04b 100644 +--- a/drivers/irqchip/irq-gic-v3-its.c ++++ b/drivers/irqchip/irq-gic-v3-its.c +@@ -4501,8 +4501,6 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq + struct page *vprop_page; + int base, nr_ids, i, err = 0; + +- BUG_ON(!vm); +- + bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); + if (!bitmap) + return -ENOMEM; +diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c +index 02ab6a944539f..ea4b921e5e158 100644 +--- a/drivers/irqchip/irq-renesas-rzg2l.c ++++ b/drivers/irqchip/irq-renesas-rzg2l.c +@@ -132,7 +132,7 @@ static void rzg2l_irqc_irq_disable(struct irq_data *d) + + raw_spin_lock(&priv->lock); + reg = readl_relaxed(priv->base + TSSR(tssr_index)); +- reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset)); ++ reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset)); + writel_relaxed(reg, priv->base + TSSR(tssr_index)); + raw_spin_unlock(&priv->lock); + } +@@ -144,7 +144,6 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d) + unsigned int hw_irq = irqd_to_hwirq(d); + + if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) { +- unsigned long tint = (uintptr_t)irq_data_get_irq_chip_data(d); + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + u32 offset = hw_irq - IRQC_TINT_START; + u32 tssr_offset = TSSR_OFFSET(offset); +@@ -153,7 +152,7 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d) + + raw_spin_lock(&priv->lock); + reg = readl_relaxed(priv->base + TSSR(tssr_index)); +- reg |= (TIEN | tint) << TSSEL_SHIFT(tssr_offset); ++ reg |= TIEN << TSSEL_SHIFT(tssr_offset); + writel_relaxed(reg, priv->base + TSSR(tssr_index)); + raw_spin_unlock(&priv->lock); + } +diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c +index c43d55672bce0..47c1fa7aad8b5 100644 +--- a/drivers/md/dm-clone-metadata.c ++++ b/drivers/md/dm-clone-metadata.c +@@ -465,11 +465,6 @@ static void __destroy_persistent_data_structures(struct dm_clone_metadata *cmd) + + /*---------------------------------------------------------------------------*/ + +-static size_t bitmap_size(unsigned long nr_bits) +-{ +- return BITS_TO_LONGS(nr_bits) * sizeof(long); +-} +- + static int __dirty_map_init(struct dirty_map *dmap, unsigned long nr_words, + unsigned long nr_regions) + { +diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c +index 3b8b2e886cf67..5bb76aab7755f 100644 +--- a/drivers/md/dm-ioctl.c ++++ b/drivers/md/dm-ioctl.c +@@ -1181,8 +1181,26 @@ static int do_resume(struct dm_ioctl *param) + suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; + if (param->flags & DM_NOFLUSH_FLAG) + suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; +- if (!dm_suspended_md(md)) +- dm_suspend(md, suspend_flags); ++ if (!dm_suspended_md(md)) { ++ r = dm_suspend(md, suspend_flags); ++ if (r) { ++ down_write(&_hash_lock); ++ hc = dm_get_mdptr(md); ++ if (hc && !hc->new_map) { ++ hc->new_map = new_map; ++ new_map = NULL; ++ } else { ++ r = -ENXIO; ++ } ++ up_write(&_hash_lock); ++ if (new_map) { ++ dm_sync_table(md); ++ dm_table_destroy(new_map); ++ } ++ dm_put(md); ++ return r; ++ } ++ } + + old_size = dm_get_size(md); + old_map = dm_swap_table(md, new_map); +diff --git a/drivers/md/dm.c b/drivers/md/dm.c +index f945ee453457b..8ec0a263744a5 100644 +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -2531,7 +2531,7 @@ static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int ta + break; + + if (signal_pending_state(task_state, current)) { +- r = -EINTR; ++ r = -ERESTARTSYS; + break; + } + +@@ -2556,7 +2556,7 @@ static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_st + break; + + if (signal_pending_state(task_state, current)) { +- r = -EINTR; ++ r = -ERESTARTSYS; + break; + } + +diff --git a/drivers/md/md.c b/drivers/md/md.c +index 35b003b83ef1b..d1f6770c5cc09 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -7652,11 +7652,6 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode, + + mddev = bdev->bd_disk->private_data; + +- if (!mddev) { +- BUG(); +- goto out; +- } +- + /* Some actions do not requires the mutex */ + switch (cmd) { + case GET_ARRAY_INFO: +diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c +index 04698fd03e606..d48c4fafc7798 100644 +--- a/drivers/md/persistent-data/dm-space-map-metadata.c ++++ b/drivers/md/persistent-data/dm-space-map-metadata.c +@@ -277,7 +277,7 @@ static void sm_metadata_destroy(struct dm_space_map *sm) + { + struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); + +- kfree(smm); ++ kvfree(smm); + } + + static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count) +@@ -772,7 +772,7 @@ struct dm_space_map *dm_sm_metadata_init(void) + { + struct sm_metadata *smm; + +- smm = kmalloc(sizeof(*smm), GFP_KERNEL); ++ smm = kvmalloc(sizeof(*smm), GFP_KERNEL); + if (!smm) + return ERR_PTR(-ENOMEM); + +diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c +index 518b7cfa78b9d..889bba60d6ff7 100644 +--- a/drivers/md/raid5-cache.c ++++ b/drivers/md/raid5-cache.c +@@ -327,8 +327,9 @@ void r5l_wake_reclaim(struct r5l_log *log, sector_t space); + void r5c_check_stripe_cache_usage(struct r5conf *conf) + { + int total_cached; ++ struct r5l_log *log = READ_ONCE(conf->log); + +- if (!r5c_is_writeback(conf->log)) ++ if (!r5c_is_writeback(log)) + return; + + total_cached = atomic_read(&conf->r5c_cached_partial_stripes) + +@@ -344,7 +345,7 @@ void r5c_check_stripe_cache_usage(struct r5conf *conf) + */ + if (total_cached > conf->min_nr_stripes * 1 / 2 || + atomic_read(&conf->empty_inactive_list_nr) > 0) +- r5l_wake_reclaim(conf->log, 0); ++ r5l_wake_reclaim(log, 0); + } + + /* +@@ -353,7 +354,9 @@ void r5c_check_stripe_cache_usage(struct r5conf *conf) + */ + void r5c_check_cached_full_stripe(struct r5conf *conf) + { +- if (!r5c_is_writeback(conf->log)) ++ struct r5l_log *log = READ_ONCE(conf->log); ++ ++ if (!r5c_is_writeback(log)) + return; + + /* +@@ -363,7 +366,7 @@ void r5c_check_cached_full_stripe(struct r5conf *conf) + if (atomic_read(&conf->r5c_cached_full_stripes) >= + min(R5C_FULL_STRIPE_FLUSH_BATCH(conf), + conf->chunk_sectors >> RAID5_STRIPE_SHIFT(conf))) +- r5l_wake_reclaim(conf->log, 0); ++ r5l_wake_reclaim(log, 0); + } + + /* +@@ -396,7 +399,7 @@ void r5c_check_cached_full_stripe(struct r5conf *conf) + */ + static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf) + { +- struct r5l_log *log = conf->log; ++ struct r5l_log *log = READ_ONCE(conf->log); + + if (!r5c_is_writeback(log)) + return 0; +@@ -449,7 +452,7 @@ static inline void r5c_update_log_state(struct r5l_log *log) + void r5c_make_stripe_write_out(struct stripe_head *sh) + { + struct r5conf *conf = sh->raid_conf; +- struct r5l_log *log = conf->log; ++ struct r5l_log *log = READ_ONCE(conf->log); + + BUG_ON(!r5c_is_writeback(log)); + +@@ -491,7 +494,7 @@ static void r5c_handle_parity_cached(struct stripe_head *sh) + */ + static void r5c_finish_cache_stripe(struct stripe_head *sh) + { +- struct r5l_log *log = sh->raid_conf->log; ++ struct r5l_log *log = READ_ONCE(sh->raid_conf->log); + + if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { + BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); +@@ -692,7 +695,7 @@ static void r5c_disable_writeback_async(struct work_struct *work) + + /* wait superblock change before suspend */ + wait_event(mddev->sb_wait, +- conf->log == NULL || ++ !READ_ONCE(conf->log) || + (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && + (locked = mddev_trylock(mddev)))); + if (locked) { +@@ -1151,7 +1154,7 @@ static void r5l_run_no_space_stripes(struct r5l_log *log) + static sector_t r5c_calculate_new_cp(struct r5conf *conf) + { + struct stripe_head *sh; +- struct r5l_log *log = conf->log; ++ struct r5l_log *log = READ_ONCE(conf->log); + sector_t new_cp; + unsigned long flags; + +@@ -1159,12 +1162,12 @@ static sector_t r5c_calculate_new_cp(struct r5conf *conf) + return log->next_checkpoint; + + spin_lock_irqsave(&log->stripe_in_journal_lock, flags); +- if (list_empty(&conf->log->stripe_in_journal_list)) { ++ if (list_empty(&log->stripe_in_journal_list)) { + /* all stripes flushed */ + spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); + return log->next_checkpoint; + } +- sh = list_first_entry(&conf->log->stripe_in_journal_list, ++ sh = list_first_entry(&log->stripe_in_journal_list, + struct stripe_head, r5c); + new_cp = sh->log_start; + spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); +@@ -1399,7 +1402,7 @@ void r5c_flush_cache(struct r5conf *conf, int num) + struct stripe_head *sh, *next; + + lockdep_assert_held(&conf->device_lock); +- if (!conf->log) ++ if (!READ_ONCE(conf->log)) + return; + + count = 0; +@@ -1420,7 +1423,7 @@ void r5c_flush_cache(struct r5conf *conf, int num) + + static void r5c_do_reclaim(struct r5conf *conf) + { +- struct r5l_log *log = conf->log; ++ struct r5l_log *log = READ_ONCE(conf->log); + struct stripe_head *sh; + int count = 0; + unsigned long flags; +@@ -1549,7 +1552,7 @@ static void r5l_reclaim_thread(struct md_thread *thread) + { + struct mddev *mddev = thread->mddev; + struct r5conf *conf = mddev->private; +- struct r5l_log *log = conf->log; ++ struct r5l_log *log = READ_ONCE(conf->log); + + if (!log) + return; +@@ -1591,7 +1594,7 @@ void r5l_quiesce(struct r5l_log *log, int quiesce) + + bool r5l_log_disk_error(struct r5conf *conf) + { +- struct r5l_log *log = conf->log; ++ struct r5l_log *log = READ_ONCE(conf->log); + + /* don't allow write if journal disk is missing */ + if (!log) +@@ -2635,7 +2638,7 @@ int r5c_try_caching_write(struct r5conf *conf, + struct stripe_head_state *s, + int disks) + { +- struct r5l_log *log = conf->log; ++ struct r5l_log *log = READ_ONCE(conf->log); + int i; + struct r5dev *dev; + int to_cache = 0; +@@ -2802,7 +2805,7 @@ void r5c_finish_stripe_write_out(struct r5conf *conf, + struct stripe_head *sh, + struct stripe_head_state *s) + { +- struct r5l_log *log = conf->log; ++ struct r5l_log *log = READ_ONCE(conf->log); + int i; + int do_wakeup = 0; + sector_t tree_index; +@@ -2941,7 +2944,7 @@ int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh) + /* check whether this big stripe is in write back cache. */ + bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect) + { +- struct r5l_log *log = conf->log; ++ struct r5l_log *log = READ_ONCE(conf->log); + sector_t tree_index; + void *slot; + +@@ -3049,14 +3052,14 @@ int r5l_start(struct r5l_log *log) + void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev) + { + struct r5conf *conf = mddev->private; +- struct r5l_log *log = conf->log; ++ struct r5l_log *log = READ_ONCE(conf->log); + + if (!log) + return; + + if ((raid5_calc_degraded(conf) > 0 || + test_bit(Journal, &rdev->flags)) && +- conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) ++ log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) + schedule_work(&log->disable_writeback_work); + } + +@@ -3145,7 +3148,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) + spin_lock_init(&log->stripe_in_journal_lock); + atomic_set(&log->stripe_in_journal_count, 0); + +- conf->log = log; ++ WRITE_ONCE(conf->log, log); + + set_bit(MD_HAS_JOURNAL, &conf->mddev->flags); + return 0; +@@ -3173,7 +3176,7 @@ void r5l_exit_log(struct r5conf *conf) + * 'reconfig_mutex' is held by caller, set 'confg->log' to NULL to + * ensure disable_writeback_work wakes up and exits. + */ +- conf->log = NULL; ++ WRITE_ONCE(conf->log, NULL); + wake_up(&conf->mddev->sb_wait); + flush_work(&log->disable_writeback_work); + +diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c +index 9293b058ab997..93d3378a0df4b 100644 +--- a/drivers/media/dvb-core/dvb_frontend.c ++++ b/drivers/media/dvb-core/dvb_frontend.c +@@ -2168,7 +2168,8 @@ static int dvb_frontend_handle_compat_ioctl(struct file *file, unsigned int cmd, + if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS)) + return -EINVAL; + +- tvp = memdup_user(compat_ptr(tvps->props), tvps->num * sizeof(*tvp)); ++ tvp = memdup_array_user(compat_ptr(tvps->props), ++ tvps->num, sizeof(*tvp)); + if (IS_ERR(tvp)) + return PTR_ERR(tvp); + +@@ -2199,7 +2200,8 @@ static int dvb_frontend_handle_compat_ioctl(struct file *file, unsigned int cmd, + if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS)) + return -EINVAL; + +- tvp = memdup_user(compat_ptr(tvps->props), tvps->num * sizeof(*tvp)); ++ tvp = memdup_array_user(compat_ptr(tvps->props), ++ tvps->num, sizeof(*tvp)); + if (IS_ERR(tvp)) + return PTR_ERR(tvp); + +@@ -2379,7 +2381,8 @@ static int dvb_get_property(struct dvb_frontend *fe, struct file *file, + if (!tvps->num || tvps->num > DTV_IOCTL_MAX_MSGS) + return -EINVAL; + +- tvp = memdup_user((void __user *)tvps->props, tvps->num * sizeof(*tvp)); ++ tvp = memdup_array_user((void __user *)tvps->props, ++ tvps->num, sizeof(*tvp)); + if (IS_ERR(tvp)) + return PTR_ERR(tvp); + +@@ -2457,7 +2460,8 @@ static int dvb_frontend_handle_ioctl(struct file *file, + if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS)) + return -EINVAL; + +- tvp = memdup_user((void __user *)tvps->props, tvps->num * sizeof(*tvp)); ++ tvp = memdup_array_user((void __user *)tvps->props, ++ tvps->num, sizeof(*tvp)); + if (IS_ERR(tvp)) + return PTR_ERR(tvp); + +diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c +index 9af2c5596121c..51d7d720ec48b 100644 +--- a/drivers/media/pci/cx23885/cx23885-video.c ++++ b/drivers/media/pci/cx23885/cx23885-video.c +@@ -1354,6 +1354,10 @@ int cx23885_video_register(struct cx23885_dev *dev) + /* register Video device */ + dev->video_dev = cx23885_vdev_init(dev, dev->pci, + &cx23885_video_template, "video"); ++ if (!dev->video_dev) { ++ err = -ENOMEM; ++ goto fail_unreg; ++ } + dev->video_dev->queue = &dev->vb2_vidq; + dev->video_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | + V4L2_CAP_AUDIO | V4L2_CAP_VIDEO_CAPTURE; +@@ -1382,6 +1386,10 @@ int cx23885_video_register(struct cx23885_dev *dev) + /* register VBI device */ + dev->vbi_dev = cx23885_vdev_init(dev, dev->pci, + &cx23885_vbi_template, "vbi"); ++ if (!dev->vbi_dev) { ++ err = -ENOMEM; ++ goto fail_unreg; ++ } + dev->vbi_dev->queue = &dev->vb2_vbiq; + dev->vbi_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | + V4L2_CAP_AUDIO | V4L2_CAP_VBI_CAPTURE; +diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c +index 48c9084bb4dba..a1b127caa90a7 100644 +--- a/drivers/media/platform/qcom/venus/pm_helpers.c ++++ b/drivers/media/platform/qcom/venus/pm_helpers.c +@@ -870,7 +870,7 @@ static int vcodec_domains_get(struct venus_core *core) + pd = dev_pm_domain_attach_by_name(dev, + res->vcodec_pmdomains[i]); + if (IS_ERR_OR_NULL(pd)) +- return PTR_ERR(pd) ? : -ENODATA; ++ return pd ? PTR_ERR(pd) : -ENODATA; + core->pmdomains[i] = pd; + } + +diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c +index f62703cebb77c..4b4c129c09e70 100644 +--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c ++++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c +@@ -1297,7 +1297,7 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx) + if (ctx->state == MFCINST_FINISHING && ctx->ref_queue_cnt == 0) + src_ready = false; + if (!src_ready || ctx->dst_queue_cnt == 0) +- clear_work_bit(ctx); ++ clear_work_bit_irqsave(ctx); + + return 0; + } +diff --git a/drivers/media/radio/radio-isa.c b/drivers/media/radio/radio-isa.c +index c591c0851fa28..ad49151f5ff09 100644 +--- a/drivers/media/radio/radio-isa.c ++++ b/drivers/media/radio/radio-isa.c +@@ -36,7 +36,7 @@ static int radio_isa_querycap(struct file *file, void *priv, + + strscpy(v->driver, isa->drv->driver.driver.name, sizeof(v->driver)); + strscpy(v->card, isa->drv->card, sizeof(v->card)); +- snprintf(v->bus_info, sizeof(v->bus_info), "ISA:%s", isa->v4l2_dev.name); ++ snprintf(v->bus_info, sizeof(v->bus_info), "ISA:%s", dev_name(isa->v4l2_dev.dev)); + return 0; + } + +diff --git a/drivers/memory/stm32-fmc2-ebi.c b/drivers/memory/stm32-fmc2-ebi.c +index 9015e8277dc8a..871f3de69102e 100644 +--- a/drivers/memory/stm32-fmc2-ebi.c ++++ b/drivers/memory/stm32-fmc2-ebi.c +@@ -181,8 +181,11 @@ static int stm32_fmc2_ebi_check_mux(struct stm32_fmc2_ebi *ebi, + int cs) + { + u32 bcr; ++ int ret; + +- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); ++ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); ++ if (ret) ++ return ret; + + if (bcr & FMC2_BCR_MTYP) + return 0; +@@ -195,8 +198,11 @@ static int stm32_fmc2_ebi_check_waitcfg(struct stm32_fmc2_ebi *ebi, + int cs) + { + u32 bcr, val = FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR); ++ int ret; + +- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); ++ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); ++ if (ret) ++ return ret; + + if ((bcr & FMC2_BCR_MTYP) == val && bcr & FMC2_BCR_BURSTEN) + return 0; +@@ -209,8 +215,11 @@ static int stm32_fmc2_ebi_check_sync_trans(struct stm32_fmc2_ebi *ebi, + int cs) + { + u32 bcr; ++ int ret; + +- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); ++ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); ++ if (ret) ++ return ret; + + if (bcr & FMC2_BCR_BURSTEN) + return 0; +@@ -223,8 +232,11 @@ static int stm32_fmc2_ebi_check_async_trans(struct stm32_fmc2_ebi *ebi, + int cs) + { + u32 bcr; ++ int ret; + +- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); ++ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); ++ if (ret) ++ return ret; + + if (!(bcr & FMC2_BCR_BURSTEN) || !(bcr & FMC2_BCR_CBURSTRW)) + return 0; +@@ -237,8 +249,11 @@ static int stm32_fmc2_ebi_check_cpsize(struct stm32_fmc2_ebi *ebi, + int cs) + { + u32 bcr, val = FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM); ++ int ret; + +- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); ++ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); ++ if (ret) ++ return ret; + + if ((bcr & FMC2_BCR_MTYP) == val && bcr & FMC2_BCR_BURSTEN) + return 0; +@@ -251,12 +266,18 @@ static int stm32_fmc2_ebi_check_address_hold(struct stm32_fmc2_ebi *ebi, + int cs) + { + u32 bcr, bxtr, val = FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_D); ++ int ret; ++ ++ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); ++ if (ret) ++ return ret; + +- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + if (prop->reg_type == FMC2_REG_BWTR) +- regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr); ++ ret = regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr); + else +- regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr); ++ ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr); ++ if (ret) ++ return ret; + + if ((!(bcr & FMC2_BCR_BURSTEN) || !(bcr & FMC2_BCR_CBURSTRW)) && + ((bxtr & FMC2_BXTR_ACCMOD) == val || bcr & FMC2_BCR_MUXEN)) +@@ -270,12 +291,19 @@ static int stm32_fmc2_ebi_check_clk_period(struct stm32_fmc2_ebi *ebi, + int cs) + { + u32 bcr, bcr1; ++ int ret; + +- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); +- if (cs) +- regmap_read(ebi->regmap, FMC2_BCR1, &bcr1); +- else ++ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); ++ if (ret) ++ return ret; ++ ++ if (cs) { ++ ret = regmap_read(ebi->regmap, FMC2_BCR1, &bcr1); ++ if (ret) ++ return ret; ++ } else { + bcr1 = bcr; ++ } + + if (bcr & FMC2_BCR_BURSTEN && (!cs || !(bcr1 & FMC2_BCR1_CCLKEN))) + return 0; +@@ -307,12 +335,18 @@ static u32 stm32_fmc2_ebi_ns_to_clk_period(struct stm32_fmc2_ebi *ebi, + { + u32 nb_clk_cycles = stm32_fmc2_ebi_ns_to_clock_cycles(ebi, cs, setup); + u32 bcr, btr, clk_period; ++ int ret; ++ ++ ret = regmap_read(ebi->regmap, FMC2_BCR1, &bcr); ++ if (ret) ++ return ret; + +- regmap_read(ebi->regmap, FMC2_BCR1, &bcr); + if (bcr & FMC2_BCR1_CCLKEN || !cs) +- regmap_read(ebi->regmap, FMC2_BTR1, &btr); ++ ret = regmap_read(ebi->regmap, FMC2_BTR1, &btr); + else +- regmap_read(ebi->regmap, FMC2_BTR(cs), &btr); ++ ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &btr); ++ if (ret) ++ return ret; + + clk_period = FIELD_GET(FMC2_BTR_CLKDIV, btr) + 1; + +@@ -571,11 +605,16 @@ static int stm32_fmc2_ebi_set_address_setup(struct stm32_fmc2_ebi *ebi, + if (ret) + return ret; + +- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); ++ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); ++ if (ret) ++ return ret; ++ + if (prop->reg_type == FMC2_REG_BWTR) +- regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr); ++ ret = regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr); + else +- regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr); ++ ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr); ++ if (ret) ++ return ret; + + if ((bxtr & FMC2_BXTR_ACCMOD) == val || bcr & FMC2_BCR_MUXEN) + val = clamp_val(setup, 1, FMC2_BXTR_ADDSET_MAX); +@@ -693,11 +732,14 @@ static int stm32_fmc2_ebi_set_max_low_pulse(struct stm32_fmc2_ebi *ebi, + int cs, u32 setup) + { + u32 old_val, new_val, pcscntr; ++ int ret; + + if (setup < 1) + return 0; + +- regmap_read(ebi->regmap, FMC2_PCSCNTR, &pcscntr); ++ ret = regmap_read(ebi->regmap, FMC2_PCSCNTR, &pcscntr); ++ if (ret) ++ return ret; + + /* Enable counter for the bank */ + regmap_update_bits(ebi->regmap, FMC2_PCSCNTR, +@@ -944,17 +986,20 @@ static void stm32_fmc2_ebi_disable_bank(struct stm32_fmc2_ebi *ebi, int cs) + regmap_update_bits(ebi->regmap, FMC2_BCR(cs), FMC2_BCR_MBKEN, 0); + } + +-static void stm32_fmc2_ebi_save_setup(struct stm32_fmc2_ebi *ebi) ++static int stm32_fmc2_ebi_save_setup(struct stm32_fmc2_ebi *ebi) + { + unsigned int cs; ++ int ret; + + for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) { +- regmap_read(ebi->regmap, FMC2_BCR(cs), &ebi->bcr[cs]); +- regmap_read(ebi->regmap, FMC2_BTR(cs), &ebi->btr[cs]); +- regmap_read(ebi->regmap, FMC2_BWTR(cs), &ebi->bwtr[cs]); ++ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &ebi->bcr[cs]); ++ ret |= regmap_read(ebi->regmap, FMC2_BTR(cs), &ebi->btr[cs]); ++ ret |= regmap_read(ebi->regmap, FMC2_BWTR(cs), &ebi->bwtr[cs]); ++ if (ret) ++ return ret; + } + +- regmap_read(ebi->regmap, FMC2_PCSCNTR, &ebi->pcscntr); ++ return regmap_read(ebi->regmap, FMC2_PCSCNTR, &ebi->pcscntr); + } + + static void stm32_fmc2_ebi_set_setup(struct stm32_fmc2_ebi *ebi) +@@ -983,22 +1028,29 @@ static void stm32_fmc2_ebi_disable_banks(struct stm32_fmc2_ebi *ebi) + } + + /* NWAIT signal can not be connected to EBI controller and NAND controller */ +-static bool stm32_fmc2_ebi_nwait_used_by_ctrls(struct stm32_fmc2_ebi *ebi) ++static int stm32_fmc2_ebi_nwait_used_by_ctrls(struct stm32_fmc2_ebi *ebi) + { ++ struct device *dev = ebi->dev; + unsigned int cs; + u32 bcr; ++ int ret; + + for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) { + if (!(ebi->bank_assigned & BIT(cs))) + continue; + +- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); ++ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); ++ if (ret) ++ return ret; ++ + if ((bcr & FMC2_BCR_WAITEN || bcr & FMC2_BCR_ASYNCWAIT) && +- ebi->bank_assigned & BIT(FMC2_NAND)) +- return true; ++ ebi->bank_assigned & BIT(FMC2_NAND)) { ++ dev_err(dev, "NWAIT signal connected to EBI and NAND controllers\n"); ++ return -EINVAL; ++ } + } + +- return false; ++ return 0; + } + + static void stm32_fmc2_ebi_enable(struct stm32_fmc2_ebi *ebi) +@@ -1085,10 +1137,9 @@ static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi) + return -ENODEV; + } + +- if (stm32_fmc2_ebi_nwait_used_by_ctrls(ebi)) { +- dev_err(dev, "NWAIT signal connected to EBI and NAND controllers\n"); +- return -EINVAL; +- } ++ ret = stm32_fmc2_ebi_nwait_used_by_ctrls(ebi); ++ if (ret) ++ return ret; + + stm32_fmc2_ebi_enable(ebi); + +@@ -1133,7 +1184,10 @@ static int stm32_fmc2_ebi_probe(struct platform_device *pdev) + if (ret) + goto err_release; + +- stm32_fmc2_ebi_save_setup(ebi); ++ ret = stm32_fmc2_ebi_save_setup(ebi); ++ if (ret) ++ goto err_release; ++ + platform_set_drvdata(pdev, ebi); + + return 0; +diff --git a/drivers/memory/tegra/tegra186.c b/drivers/memory/tegra/tegra186.c +index 533f85a4b2bdb..7633481e547d2 100644 +--- a/drivers/memory/tegra/tegra186.c ++++ b/drivers/memory/tegra/tegra186.c +@@ -75,6 +75,9 @@ static void tegra186_mc_client_sid_override(struct tegra_mc *mc, + { + u32 value, old; + ++ if (client->regs.sid.security == 0 && client->regs.sid.override == 0) ++ return; ++ + value = readl(mc->regs + client->regs.sid.security); + if ((value & MC_SID_STREAMID_SECURITY_OVERRIDE) == 0) { + /* +diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c +index bbcbc921f3ab8..f0626814d56b9 100644 +--- a/drivers/misc/fastrpc.c ++++ b/drivers/misc/fastrpc.c +@@ -2087,16 +2087,6 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp) + return err; + } + +-static int is_attach_rejected(struct fastrpc_user *fl) +-{ +- /* Check if the device node is non-secure */ +- if (!fl->is_secure_dev) { +- dev_dbg(&fl->cctx->rpdev->dev, "untrusted app trying to attach to privileged DSP PD\n"); +- return -EACCES; +- } +- return 0; +-} +- + static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) + { +@@ -2109,19 +2099,13 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, + err = fastrpc_invoke(fl, argp); + break; + case FASTRPC_IOCTL_INIT_ATTACH: +- err = is_attach_rejected(fl); +- if (!err) +- err = fastrpc_init_attach(fl, ROOT_PD); ++ err = fastrpc_init_attach(fl, ROOT_PD); + break; + case FASTRPC_IOCTL_INIT_ATTACH_SNS: +- err = is_attach_rejected(fl); +- if (!err) +- err = fastrpc_init_attach(fl, SENSORS_PD); ++ err = fastrpc_init_attach(fl, SENSORS_PD); + break; + case FASTRPC_IOCTL_INIT_CREATE_STATIC: +- err = is_attach_rejected(fl); +- if (!err) +- err = fastrpc_init_create_static_process(fl, argp); ++ err = fastrpc_init_create_static_process(fl, argp); + break; + case FASTRPC_IOCTL_INIT_CREATE: + err = fastrpc_init_create_process(fl, argp); +diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c +index 0f6a563103fd2..d780880ddd14b 100644 +--- a/drivers/mmc/core/mmc_test.c ++++ b/drivers/mmc/core/mmc_test.c +@@ -3104,13 +3104,13 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf, + test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); + #ifdef CONFIG_HIGHMEM + test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER); ++ if (!test->highmem) { ++ count = -ENOMEM; ++ goto free_test_buffer; ++ } + #endif + +-#ifdef CONFIG_HIGHMEM +- if (test->buffer && test->highmem) { +-#else + if (test->buffer) { +-#endif + mutex_lock(&mmc_test_lock); + mmc_test_run(test, testcase); + mutex_unlock(&mmc_test_lock); +@@ -3118,6 +3118,7 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf, + + #ifdef CONFIG_HIGHMEM + __free_pages(test->highmem, BUFFER_ORDER); ++free_test_buffer: + #endif + kfree(test->buffer); + kfree(test); +diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c +index 829af2c98a448..02bee7afab37e 100644 +--- a/drivers/mmc/host/dw_mmc.c ++++ b/drivers/mmc/host/dw_mmc.c +@@ -3294,6 +3294,10 @@ int dw_mci_probe(struct dw_mci *host) + host->biu_clk = devm_clk_get(host->dev, "biu"); + if (IS_ERR(host->biu_clk)) { + dev_dbg(host->dev, "biu clock not available\n"); ++ ret = PTR_ERR(host->biu_clk); ++ if (ret == -EPROBE_DEFER) ++ return ret; ++ + } else { + ret = clk_prepare_enable(host->biu_clk); + if (ret) { +@@ -3305,6 +3309,10 @@ int dw_mci_probe(struct dw_mci *host) + host->ciu_clk = devm_clk_get(host->dev, "ciu"); + if (IS_ERR(host->ciu_clk)) { + dev_dbg(host->dev, "ciu clock not available\n"); ++ ret = PTR_ERR(host->ciu_clk); ++ if (ret == -EPROBE_DEFER) ++ goto err_clk_biu; ++ + host->bus_hz = host->pdata->bus_hz; + } else { + ret = clk_prepare_enable(host->ciu_clk); +diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c +index 97f7c3d4be6ea..8b755f1627325 100644 +--- a/drivers/mmc/host/mtk-sd.c ++++ b/drivers/mmc/host/mtk-sd.c +@@ -1222,7 +1222,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events, + } + + if (!sbc_error && !(events & MSDC_INT_CMDRDY)) { +- if (events & MSDC_INT_CMDTMO || ++ if ((events & MSDC_INT_CMDTMO && !host->hs400_tuning) || + (!mmc_op_tuning(cmd->opcode) && !host->hs400_tuning)) + /* + * should not clear fifo/interrupt as the tune data +@@ -1315,9 +1315,9 @@ static void msdc_start_command(struct msdc_host *host, + static void msdc_cmd_next(struct msdc_host *host, + struct mmc_request *mrq, struct mmc_command *cmd) + { +- if ((cmd->error && +- !(cmd->error == -EILSEQ && +- (mmc_op_tuning(cmd->opcode) || host->hs400_tuning))) || ++ if ((cmd->error && !host->hs400_tuning && ++ !(cmd->error == -EILSEQ && ++ mmc_op_tuning(cmd->opcode))) || + (mrq->sbc && mrq->sbc->error)) + msdc_request_done(host, mrq); + else if (cmd == mrq->sbc) +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index 722ac5c4992c9..566b02ca78261 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -582,7 +582,6 @@ static void bond_ipsec_del_sa_all(struct bonding *bond) + } else { + slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs); + } +- ipsec->xs->xso.real_dev = NULL; + } + spin_unlock_bh(&bond->ipsec_lock); + rcu_read_unlock(); +@@ -599,34 +598,30 @@ static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) + struct net_device *real_dev; + struct slave *curr_active; + struct bonding *bond; +- int err; ++ bool ok = false; + + bond = netdev_priv(bond_dev); + rcu_read_lock(); + curr_active = rcu_dereference(bond->curr_active_slave); ++ if (!curr_active) ++ goto out; + real_dev = curr_active->dev; + +- if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { +- err = false; ++ if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) + goto out; +- } + +- if (!xs->xso.real_dev) { +- err = false; ++ if (!xs->xso.real_dev) + goto out; +- } + + if (!real_dev->xfrmdev_ops || + !real_dev->xfrmdev_ops->xdo_dev_offload_ok || +- netif_is_bond_master(real_dev)) { +- err = false; ++ netif_is_bond_master(real_dev)) + goto out; +- } + +- err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); ++ ok = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); + out: + rcu_read_unlock(); +- return err; ++ return ok; + } + + static const struct xfrmdev_ops bond_xfrmdev_ops = { +diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c +index 00a662f8edd6b..d1208d058eea1 100644 +--- a/drivers/net/bonding/bond_options.c ++++ b/drivers/net/bonding/bond_options.c +@@ -920,7 +920,7 @@ static int bond_option_active_slave_set(struct bonding *bond, + /* check to see if we are clearing active */ + if (!slave_dev) { + netdev_dbg(bond->dev, "Clearing current active slave\n"); +- RCU_INIT_POINTER(bond->curr_active_slave, NULL); ++ bond_change_active_slave(bond, NULL); + bond_select_active_slave(bond); + } else { + struct slave *old_active = rtnl_dereference(bond->curr_active_slave); +diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c +index 4e22a695a64c3..7ef5fac69657f 100644 +--- a/drivers/net/dsa/microchip/ksz_ptp.c ++++ b/drivers/net/dsa/microchip/ksz_ptp.c +@@ -266,7 +266,6 @@ static int ksz_ptp_enable_mode(struct ksz_device *dev) + struct ksz_port *prt; + struct dsa_port *dp; + bool tag_en = false; +- int ret; + + dsa_switch_for_each_user_port(dp, dev->ds) { + prt = &dev->ports[dp->index]; +@@ -277,9 +276,7 @@ static int ksz_ptp_enable_mode(struct ksz_device *dev) + } + + if (tag_en) { +- ret = ptp_schedule_worker(ptp_data->clock, 0); +- if (ret) +- return ret; ++ ptp_schedule_worker(ptp_data->clock, 0); + } else { + ptp_cancel_worker_sync(ptp_data->clock); + } +diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c +index ce3b3690c3c05..c47f068f56b32 100644 +--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c ++++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c +@@ -457,7 +457,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) + trace_mv88e6xxx_atu_full_violation(chip->dev, spid, + entry.portvec, entry.mac, + fid); +- chip->ports[spid].atu_full_violation++; ++ if (spid < ARRAY_SIZE(chip->ports)) ++ chip->ports[spid].atu_full_violation++; + } + + return IRQ_HANDLED; +diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c +index 9a3e5ec169726..b0b4b4af9a1df 100644 +--- a/drivers/net/dsa/ocelot/felix.c ++++ b/drivers/net/dsa/ocelot/felix.c +@@ -528,7 +528,9 @@ static int felix_tag_8021q_setup(struct dsa_switch *ds) + * so we need to be careful that there are no extra frames to be + * dequeued over MMIO, since we would never know to discard them. + */ ++ ocelot_lock_xtr_grp_bh(ocelot, 0); + ocelot_drain_cpu_queue(ocelot, 0); ++ ocelot_unlock_xtr_grp_bh(ocelot, 0); + + return 0; + } +@@ -1504,6 +1506,8 @@ static void felix_port_deferred_xmit(struct kthread_work *work) + int port = xmit_work->dp->index; + int retries = 10; + ++ ocelot_lock_inj_grp(ocelot, 0); ++ + do { + if (ocelot_can_inject(ocelot, 0)) + break; +@@ -1512,6 +1516,7 @@ static void felix_port_deferred_xmit(struct kthread_work *work) + } while (--retries); + + if (!retries) { ++ ocelot_unlock_inj_grp(ocelot, 0); + dev_err(ocelot->dev, "port %d failed to inject skb\n", + port); + ocelot_port_purge_txtstamp_skb(ocelot, port, skb); +@@ -1521,6 +1526,8 @@ static void felix_port_deferred_xmit(struct kthread_work *work) + + ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); + ++ ocelot_unlock_inj_grp(ocelot, 0); ++ + consume_skb(skb); + kfree(xmit_work); + } +@@ -1671,6 +1678,8 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot) + if (!felix->info->quirk_no_xtr_irq) + return false; + ++ ocelot_lock_xtr_grp(ocelot, grp); ++ + while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) { + struct sk_buff *skb; + unsigned int type; +@@ -1707,6 +1716,8 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot) + ocelot_drain_cpu_queue(ocelot, 0); + } + ++ ocelot_unlock_xtr_grp(ocelot, grp); ++ + return true; + } + +diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c +index c99fb1bd4c25a..23bd8b3f89931 100644 +--- a/drivers/net/dsa/vitesse-vsc73xx-core.c ++++ b/drivers/net/dsa/vitesse-vsc73xx-core.c +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -37,6 +38,10 @@ + #define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */ + #define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */ + ++/* MII Block subblock */ ++#define VSC73XX_BLOCK_MII_INTERNAL 0x0 /* Internal MDIO subblock */ ++#define VSC73XX_BLOCK_MII_EXTERNAL 0x1 /* External MDIO subblock */ ++ + #define CPU_PORT 6 /* CPU port */ + + /* MAC Block registers */ +@@ -195,6 +200,8 @@ + #define VSC73XX_MII_CMD 0x1 + #define VSC73XX_MII_DATA 0x2 + ++#define VSC73XX_MII_STAT_BUSY BIT(3) ++ + /* Arbiter block 5 registers */ + #define VSC73XX_ARBEMPTY 0x0c + #define VSC73XX_ARBDISC 0x0e +@@ -268,6 +275,10 @@ + #define IS_7398(a) ((a)->chipid == VSC73XX_CHIPID_ID_7398) + #define IS_739X(a) (IS_7395(a) || IS_7398(a)) + ++#define VSC73XX_POLL_SLEEP_US 1000 ++#define VSC73XX_MDIO_POLL_SLEEP_US 5 ++#define VSC73XX_POLL_TIMEOUT_US 10000 ++ + struct vsc73xx_counter { + u8 counter; + const char *name; +@@ -483,6 +494,22 @@ static int vsc73xx_detect(struct vsc73xx *vsc) + return 0; + } + ++static int vsc73xx_mdio_busy_check(struct vsc73xx *vsc) ++{ ++ int ret, err; ++ u32 val; ++ ++ ret = read_poll_timeout(vsc73xx_read, err, ++ err < 0 || !(val & VSC73XX_MII_STAT_BUSY), ++ VSC73XX_MDIO_POLL_SLEEP_US, ++ VSC73XX_POLL_TIMEOUT_US, false, vsc, ++ VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL, ++ VSC73XX_MII_STAT, &val); ++ if (ret) ++ return ret; ++ return err; ++} ++ + static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum) + { + struct vsc73xx *vsc = ds->priv; +@@ -490,12 +517,20 @@ static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum) + u32 val; + int ret; + ++ ret = vsc73xx_mdio_busy_check(vsc); ++ if (ret) ++ return ret; ++ + /* Setting bit 26 means "read" */ + cmd = BIT(26) | (phy << 21) | (regnum << 16); + ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd); + if (ret) + return ret; +- msleep(2); ++ ++ ret = vsc73xx_mdio_busy_check(vsc); ++ if (ret) ++ return ret; ++ + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val); + if (ret) + return ret; +@@ -519,6 +554,10 @@ static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum, + u32 cmd; + int ret; + ++ ret = vsc73xx_mdio_busy_check(vsc); ++ if (ret) ++ return ret; ++ + /* It was found through tedious experiments that this router + * chip really hates to have it's PHYs reset. They + * never recover if that happens: autonegotiation stops +@@ -530,7 +569,7 @@ static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum, + return 0; + } + +- cmd = (phy << 21) | (regnum << 16); ++ cmd = (phy << 21) | (regnum << 16) | val; + ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd); + if (ret) + return ret; +@@ -779,7 +818,7 @@ static void vsc73xx_adjust_link(struct dsa_switch *ds, int port, + * after a PHY or the CPU port comes up or down. + */ + if (!phydev->link) { +- int maxloop = 10; ++ int ret, err; + + dev_dbg(vsc->dev, "port %d: went down\n", + port); +@@ -794,19 +833,17 @@ static void vsc73xx_adjust_link(struct dsa_switch *ds, int port, + VSC73XX_ARBDISC, BIT(port), BIT(port)); + + /* Wait until queue is empty */ +- vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0, +- VSC73XX_ARBEMPTY, &val); +- while (!(val & BIT(port))) { +- msleep(1); +- vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0, +- VSC73XX_ARBEMPTY, &val); +- if (--maxloop == 0) { +- dev_err(vsc->dev, +- "timeout waiting for block arbiter\n"); +- /* Continue anyway */ +- break; +- } +- } ++ ret = read_poll_timeout(vsc73xx_read, err, ++ err < 0 || (val & BIT(port)), ++ VSC73XX_POLL_SLEEP_US, ++ VSC73XX_POLL_TIMEOUT_US, false, ++ vsc, VSC73XX_BLOCK_ARBITER, 0, ++ VSC73XX_ARBEMPTY, &val); ++ if (ret) ++ dev_err(vsc->dev, ++ "timeout waiting for block arbiter\n"); ++ else if (err < 0) ++ dev_err(vsc->dev, "error reading arbiter\n"); + + /* Put this port into reset */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +index 8cb9a99154aad..2845796f782c2 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +@@ -297,11 +297,6 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, + * redirect is coming from a frame received by the + * bnxt_en driver. + */ +- rx_buf = &rxr->rx_buf_ring[cons]; +- mapping = rx_buf->mapping - bp->rx_dma_offset; +- dma_unmap_page_attrs(&pdev->dev, mapping, +- BNXT_RX_PAGE_SIZE, bp->rx_dir, +- DMA_ATTR_WEAK_ORDERING); + + /* if we are unable to allocate a new buffer, abort and reuse */ + if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) { +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +index 786ceae344887..dd9e68465e697 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +@@ -1244,7 +1244,8 @@ static u64 hash_filter_ntuple(struct ch_filter_specification *fs, + * in the Compressed Filter Tuple. + */ + if (tp->vlan_shift >= 0 && fs->mask.ivlan) +- ntuple |= (FT_VLAN_VLD_F | fs->val.ivlan) << tp->vlan_shift; ++ ntuple |= (u64)(FT_VLAN_VLD_F | ++ fs->val.ivlan) << tp->vlan_shift; + + if (tp->port_shift >= 0 && fs->mask.iport) + ntuple |= (u64)fs->val.iport << tp->port_shift; +diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +index e01a246124ac6..a05a8525caa45 100644 +--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c ++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +@@ -2603,13 +2603,14 @@ static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw) + + static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw) + { +- int *count, i; ++ int *count, ret, i; + + for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) { ++ ret = dpaa2_switch_add_bufs(ethsw, ethsw->bpid); + count = ðsw->buf_count; +- *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid); ++ *count += ret; + +- if (unlikely(*count < BUFS_PER_CMD)) ++ if (unlikely(ret < BUFS_PER_CMD)) + return -ENOMEM; + } + +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +index db9574e9fb7bc..14d086b535a2d 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +@@ -5729,6 +5729,9 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) + struct net_device *netdev = handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); + ++ if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) ++ hns3_nic_net_stop(netdev); ++ + if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { + netdev_warn(netdev, "already uninitialized\n"); + return 0; +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +index c8059d96f64be..92c592c177e67 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +@@ -2598,8 +2598,17 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, + { + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; ++ int ret; ++ ++ ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num); ++ ++ if (ret) ++ return ret; + +- return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num); ++ hdev->hw.mac.req_speed = speed; ++ hdev->hw.mac.req_duplex = duplex; ++ ++ return 0; + } + + static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) +@@ -2901,17 +2910,20 @@ static int hclge_mac_init(struct hclge_dev *hdev) + if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + hdev->hw.mac.duplex = HCLGE_MAC_FULL; + +- ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, +- hdev->hw.mac.duplex, hdev->hw.mac.lane_num); +- if (ret) +- return ret; +- + if (hdev->hw.mac.support_autoneg) { + ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg); + if (ret) + return ret; + } + ++ if (!hdev->hw.mac.autoneg) { ++ ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.req_speed, ++ hdev->hw.mac.req_duplex, ++ hdev->hw.mac.lane_num); ++ if (ret) ++ return ret; ++ } ++ + mac->link = 0; + + if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) { +@@ -11430,8 +11442,8 @@ static void hclge_reset_done(struct hnae3_ae_dev *ae_dev) + dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret); + + hdev->reset_type = HNAE3_NONE_RESET; +- clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); +- up(&hdev->reset_sem); ++ if (test_and_clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) ++ up(&hdev->reset_sem); + } + + static void hclge_clear_resetting_state(struct hclge_dev *hdev) +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +index 877feee53804f..61e155c4d441e 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +@@ -1124,10 +1124,11 @@ void hclge_mbx_handler(struct hclge_dev *hdev) + req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; + + flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); +- if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { ++ if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B) || ++ req->mbx_src_vfid > hdev->num_req_vfs)) { + dev_warn(&hdev->pdev->dev, +- "dropped invalid mailbox message, code = %u\n", +- req->msg.code); ++ "dropped invalid mailbox message, code = %u, vfid = %u\n", ++ req->msg.code, req->mbx_src_vfid); + + /* dropping/not processing this invalid message */ + crq->desc[crq->next_to_use].flag = 0; +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +index 85fb11de43a12..80079657afebe 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +@@ -191,6 +191,9 @@ static void hclge_mac_adjust_link(struct net_device *netdev) + if (ret) + netdev_err(netdev, "failed to adjust link.\n"); + ++ hdev->hw.mac.req_speed = (u32)speed; ++ hdev->hw.mac.req_duplex = (u8)duplex; ++ + ret = hclge_cfg_flowctrl(hdev); + if (ret) + netdev_err(netdev, "failed to configure flow control.\n"); +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +index 43ee20eb03d1f..affdd9d70549a 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +@@ -1710,8 +1710,8 @@ static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev) + ret); + + hdev->reset_type = HNAE3_NONE_RESET; +- clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); +- up(&hdev->reset_sem); ++ if (test_and_clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) ++ up(&hdev->reset_sem); + } + + static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) +diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c +index 5e27470c6b1ef..f2d4669c81cf2 100644 +--- a/drivers/net/ethernet/i825xx/sun3_82586.c ++++ b/drivers/net/ethernet/i825xx/sun3_82586.c +@@ -987,7 +987,7 @@ static void sun3_82586_timeout(struct net_device *dev, unsigned int txqueue) + { + #ifdef DEBUG + printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus); +- printk("%s: command-stats: %04x %04x\n",dev->name,swab16(p->xmit_cmds[0]->cmd_status),swab16(p->xmit_cmds[1]->cmd_status)); ++ printk("%s: command-stats: %04x\n", dev->name, swab16(p->xmit_cmds[0]->cmd_status)); + printk("%s: check, whether you set the right interrupt number!\n",dev->name); + #endif + sun3_82586_close(dev); +diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c +index 4f3e65b47cdc3..9a0682b05c4ff 100644 +--- a/drivers/net/ethernet/intel/ice/ice_base.c ++++ b/drivers/net/ethernet/intel/ice/ice_base.c +@@ -519,6 +519,25 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) + return 0; + } + ++/** ++ * ice_get_frame_sz - calculate xdp_buff::frame_sz ++ * @rx_ring: the ring being configured ++ * ++ * Return frame size based on underlying PAGE_SIZE ++ */ ++static unsigned int ice_get_frame_sz(struct ice_rx_ring *rx_ring) ++{ ++ unsigned int frame_sz; ++ ++#if (PAGE_SIZE >= 8192) ++ frame_sz = rx_ring->rx_buf_len; ++#else ++ frame_sz = ice_rx_pg_size(rx_ring) / 2; ++#endif ++ ++ return frame_sz; ++} ++ + /** + * ice_vsi_cfg_rxq - Configure an Rx queue + * @ring: the ring being configured +@@ -582,7 +601,7 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) + } + } + +- xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq); ++ xdp_init_buff(&ring->xdp, ice_get_frame_sz(ring), &ring->xdp_rxq); + ring->xdp.data = NULL; + err = ice_setup_rx_ctx(ring); + if (err) { +diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c +index 49b1fa9651161..429afffa4c316 100644 +--- a/drivers/net/ethernet/intel/ice/ice_txrx.c ++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c +@@ -521,30 +521,6 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) + return -ENOMEM; + } + +-/** +- * ice_rx_frame_truesize +- * @rx_ring: ptr to Rx ring +- * @size: size +- * +- * calculate the truesize with taking into the account PAGE_SIZE of +- * underlying arch +- */ +-static unsigned int +-ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size) +-{ +- unsigned int truesize; +- +-#if (PAGE_SIZE < 8192) +- truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ +-#else +- truesize = rx_ring->rx_offset ? +- SKB_DATA_ALIGN(rx_ring->rx_offset + size) + +- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : +- SKB_DATA_ALIGN(size); +-#endif +- return truesize; +-} +- + /** + * ice_run_xdp - Executes an XDP program on initialized xdp_buff + * @rx_ring: Rx ring +@@ -834,16 +810,15 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) + if (!dev_page_is_reusable(page)) + return false; + +-#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1)) + return false; +-#else ++#if (PAGE_SIZE >= 8192) + #define ICE_LAST_OFFSET \ +- (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) ++ (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_3072) + if (rx_buf->page_offset > ICE_LAST_OFFSET) + return false; +-#endif /* PAGE_SIZE < 8192) */ ++#endif /* PAGE_SIZE >= 8192) */ + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the +@@ -946,12 +921,7 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, + struct ice_rx_buf *rx_buf; + + rx_buf = &rx_ring->rx_buf[ntc]; +- rx_buf->pgcnt = +-#if (PAGE_SIZE < 8192) +- page_count(rx_buf->page); +-#else +- 0; +-#endif ++ rx_buf->pgcnt = page_count(rx_buf->page); + prefetchw(rx_buf->page); + + if (!size) +@@ -1158,11 +1128,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) + bool failure; + u32 first; + +- /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ +-#if (PAGE_SIZE < 8192) +- xdp->frame_sz = ice_rx_frame_truesize(rx_ring, 0); +-#endif +- + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + if (xdp_prog) { + xdp_ring = rx_ring->xdp_ring; +@@ -1222,10 +1187,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) + hard_start = page_address(rx_buf->page) + rx_buf->page_offset - + offset; + xdp_prepare_buff(xdp, hard_start, offset, size, !!offset); +-#if (PAGE_SIZE > 4096) +- /* At larger PAGE_SIZE, frame_sz depend on len size */ +- xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size); +-#endif + xdp_buff_clear_frags_flag(xdp); + } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { + break; +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c +index 4431e7693d45f..8c8894ef33886 100644 +--- a/drivers/net/ethernet/intel/igb/igb_main.c ++++ b/drivers/net/ethernet/intel/igb/igb_main.c +@@ -4833,6 +4833,7 @@ static void igb_set_rx_buffer_len(struct igb_adapter *adapter, + + #if (PAGE_SIZE < 8192) + if (adapter->max_frame_size > IGB_MAX_FRAME_BUILD_SKB || ++ IGB_2K_TOO_SMALL_WITH_PADDING || + rd32(E1000_RCTL) & E1000_RCTL_SBP) + set_ring_uses_large_buffer(rx_ring); + #endif +diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h +index b3037016f31d2..a18af5c87cde4 100644 +--- a/drivers/net/ethernet/intel/igc/igc_defines.h ++++ b/drivers/net/ethernet/intel/igc/igc_defines.h +@@ -402,6 +402,12 @@ + #define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */ + #define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */ + ++/* Retry Buffer Control */ ++#define IGC_RETX_CTL 0x041C ++#define IGC_RETX_CTL_WATERMARK_MASK 0xF ++#define IGC_RETX_CTL_QBVFULLTH_SHIFT 8 /* QBV Retry Buffer Full Threshold */ ++#define IGC_RETX_CTL_QBVFULLEN 0x1000 /* Enable QBV Retry Buffer Full Threshold */ ++ + /* Transmit Scheduling Latency */ + /* Latency between transmission scheduling (LaunchTime) and the time + * the packet is transmitted to the network in nanosecond. +diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c +index d80bbcdeb93ed..21fb1a98ebca6 100644 +--- a/drivers/net/ethernet/intel/igc/igc_main.c ++++ b/drivers/net/ethernet/intel/igc/igc_main.c +@@ -6217,12 +6217,16 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, + if (!validate_schedule(adapter, qopt)) + return -EINVAL; + ++ igc_ptp_read(adapter, &now); ++ ++ if (igc_tsn_is_taprio_activated_by_user(adapter) && ++ is_base_time_past(qopt->base_time, &now)) ++ adapter->qbv_config_change_errors++; ++ + adapter->cycle_time = qopt->cycle_time; + adapter->base_time = qopt->base_time; + adapter->taprio_offload_enable = true; + +- igc_ptp_read(adapter, &now); +- + for (n = 0; n < qopt->num_entries; n++) { + struct tc_taprio_sched_entry *e = &qopt->entries[n]; + +diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c +index 22cefb1eeedfa..d68fa7f3d5f07 100644 +--- a/drivers/net/ethernet/intel/igc/igc_tsn.c ++++ b/drivers/net/ethernet/intel/igc/igc_tsn.c +@@ -49,12 +49,19 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter) + return new_flags; + } + ++static bool igc_tsn_is_tx_mode_in_tsn(struct igc_adapter *adapter) ++{ ++ struct igc_hw *hw = &adapter->hw; ++ ++ return !!(rd32(IGC_TQAVCTRL) & IGC_TQAVCTRL_TRANSMIT_MODE_TSN); ++} ++ + void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter) + { + struct igc_hw *hw = &adapter->hw; + u16 txoffset; + +- if (!is_any_launchtime(adapter)) ++ if (!igc_tsn_is_tx_mode_in_tsn(adapter)) + return; + + switch (adapter->link_speed) { +@@ -78,6 +85,23 @@ void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter) + wr32(IGC_GTXOFFSET, txoffset); + } + ++static void igc_tsn_restore_retx_default(struct igc_adapter *adapter) ++{ ++ struct igc_hw *hw = &adapter->hw; ++ u32 retxctl; ++ ++ retxctl = rd32(IGC_RETX_CTL) & IGC_RETX_CTL_WATERMARK_MASK; ++ wr32(IGC_RETX_CTL, retxctl); ++} ++ ++bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter) ++{ ++ struct igc_hw *hw = &adapter->hw; ++ ++ return (rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) && ++ adapter->taprio_offload_enable; ++} ++ + /* Returns the TSN specific registers to their default values after + * the adapter is reset. + */ +@@ -91,6 +115,9 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter) + wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT); + ++ if (igc_is_device_id_i226(hw)) ++ igc_tsn_restore_retx_default(adapter); ++ + tqavctrl = rd32(IGC_TQAVCTRL); + tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN | + IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS); +@@ -111,6 +138,25 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter) + return 0; + } + ++/* To partially fix i226 HW errata, reduce MAC internal buffering from 192 Bytes ++ * to 88 Bytes by setting RETX_CTL register using the recommendation from: ++ * a) Ethernet Controller I225/I226 Specification Update Rev 2.1 ++ * Item 9: TSN: Packet Transmission Might Cross the Qbv Window ++ * b) I225/6 SW User Manual Rev 1.2.4: Section 8.11.5 Retry Buffer Control ++ */ ++static void igc_tsn_set_retx_qbvfullthreshold(struct igc_adapter *adapter) ++{ ++ struct igc_hw *hw = &adapter->hw; ++ u32 retxctl, watermark; ++ ++ retxctl = rd32(IGC_RETX_CTL); ++ watermark = retxctl & IGC_RETX_CTL_WATERMARK_MASK; ++ /* Set QBVFULLTH value using watermark and set QBVFULLEN */ ++ retxctl |= (watermark << IGC_RETX_CTL_QBVFULLTH_SHIFT) | ++ IGC_RETX_CTL_QBVFULLEN; ++ wr32(IGC_RETX_CTL, retxctl); ++} ++ + static int igc_tsn_enable_offload(struct igc_adapter *adapter) + { + struct igc_hw *hw = &adapter->hw; +@@ -123,6 +169,9 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) + wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN); + wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN); + ++ if (igc_is_device_id_i226(hw)) ++ igc_tsn_set_retx_qbvfullthreshold(adapter); ++ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + u32 txqctl = 0; +@@ -262,14 +311,6 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) + s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle); + + base_time = ktime_add_ns(base_time, (n + 1) * cycle); +- +- /* Increase the counter if scheduling into the past while +- * Gate Control List (GCL) is running. +- */ +- if ((rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) && +- (adapter->tc_setup_type == TC_SETUP_QDISC_TAPRIO) && +- (adapter->qbv_count > 1)) +- adapter->qbv_config_change_errors++; + } else { + if (igc_is_device_id_i226(hw)) { + ktime_t adjust_time, expires_time; +@@ -331,15 +372,22 @@ int igc_tsn_reset(struct igc_adapter *adapter) + return err; + } + +-int igc_tsn_offload_apply(struct igc_adapter *adapter) ++static bool igc_tsn_will_tx_mode_change(struct igc_adapter *adapter) + { +- struct igc_hw *hw = &adapter->hw; ++ bool any_tsn_enabled = !!(igc_tsn_new_flags(adapter) & ++ IGC_FLAG_TSN_ANY_ENABLED); + +- /* Per I225/6 HW Design Section 7.5.2.1, transmit mode +- * cannot be changed dynamically. Require reset the adapter. ++ return (any_tsn_enabled && !igc_tsn_is_tx_mode_in_tsn(adapter)) || ++ (!any_tsn_enabled && igc_tsn_is_tx_mode_in_tsn(adapter)); ++} ++ ++int igc_tsn_offload_apply(struct igc_adapter *adapter) ++{ ++ /* Per I225/6 HW Design Section 7.5.2.1 guideline, if tx mode change ++ * from legacy->tsn or tsn->legacy, then reset adapter is needed. + */ + if (netif_running(adapter->netdev) && +- (igc_is_device_id_i225(hw) || !adapter->qbv_count)) { ++ igc_tsn_will_tx_mode_change(adapter)) { + schedule_work(&adapter->reset_task); + return 0; + } +diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h +index b53e6af560b73..98ec845a86bf0 100644 +--- a/drivers/net/ethernet/intel/igc/igc_tsn.h ++++ b/drivers/net/ethernet/intel/igc/igc_tsn.h +@@ -7,5 +7,6 @@ + int igc_tsn_offload_apply(struct igc_adapter *adapter); + int igc_tsn_reset(struct igc_adapter *adapter); + void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter); ++bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter); + + #endif /* _IGC_BASE_H */ +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +index 3e09d22858147..daf4b951e9059 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +@@ -632,7 +632,9 @@ int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu, + return ret; + } + +-static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req) ++static bool validate_and_update_reg_offset(struct rvu *rvu, ++ struct cpt_rd_wr_reg_msg *req, ++ u64 *reg_offset) + { + u64 offset = req->reg_offset; + int blkaddr, num_lfs, lf; +@@ -663,6 +665,11 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req) + if (lf < 0) + return false; + ++ /* Translate local LF's offset to global CPT LF's offset to ++ * access LFX register. ++ */ ++ *reg_offset = (req->reg_offset & 0xFF000) + (lf << 3); ++ + return true; + } else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) { + /* Registers that can be accessed from PF */ +@@ -697,7 +704,7 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu, + struct cpt_rd_wr_reg_msg *rsp) + { + u64 offset = req->reg_offset; +- int blkaddr, lf; ++ int blkaddr; + + blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); + if (blkaddr < 0) +@@ -708,18 +715,10 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu, + !is_cpt_vf(rvu, req->hdr.pcifunc)) + return CPT_AF_ERR_ACCESS_DENIED; + +- if (!is_valid_offset(rvu, req)) ++ if (!validate_and_update_reg_offset(rvu, req, &offset)) + return CPT_AF_ERR_ACCESS_DENIED; + +- /* Translate local LF used by VFs to global CPT LF */ +- lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], req->hdr.pcifunc, +- (offset & 0xFFF) >> 3); +- +- /* Translate local LF's offset to global CPT LF's offset */ +- offset &= 0xFF000; +- offset += lf << 3; +- +- rsp->reg_offset = offset; ++ rsp->reg_offset = req->reg_offset; + rsp->ret_val = req->ret_val; + rsp->is_write = req->is_write; + +diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c +index c7196055c8c98..85a9ad2b86bff 100644 +--- a/drivers/net/ethernet/mediatek/mtk_wed.c ++++ b/drivers/net/ethernet/mediatek/mtk_wed.c +@@ -1762,14 +1762,15 @@ mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_pri + { + struct mtk_wed_flow_block_priv *priv = cb_priv; + struct flow_cls_offload *cls = type_data; +- struct mtk_wed_hw *hw = priv->hw; ++ struct mtk_wed_hw *hw = NULL; + +- if (!tc_can_offload(priv->dev)) ++ if (!priv || !tc_can_offload(priv->dev)) + return -EOPNOTSUPP; + + if (type != TC_SETUP_CLSFLOWER) + return -EOPNOTSUPP; + ++ hw = priv->hw; + return mtk_flow_offload_cmd(hw->eth, cls, hw->index); + } + +@@ -1825,6 +1826,7 @@ mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev, + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); + kfree(block_cb->cb_priv); ++ block_cb->cb_priv = NULL; + } + return 0; + default: +diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c +index 071ed3dea860d..72bcdaed12a94 100644 +--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c ++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c +@@ -68,6 +68,9 @@ mtk_wed_update_rx_stats(struct mtk_wed_device *wed, struct sk_buff *skb) + struct mtk_wed_wo_rx_stats *stats; + int i; + ++ if (!wed->wlan.update_wo_rx_stats) ++ return; ++ + if (count * sizeof(*stats) > skb->len - sizeof(u32)) + return; + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +index ff8242f67c545..51a23345caa18 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +@@ -149,7 +149,9 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx) + return err; + } + ++ mutex_lock(&priv->state_lock); + err = mlx5e_safe_reopen_channels(priv); ++ mutex_unlock(&priv->state_lock); + if (!err) { + to_ctx->status = 1; /* all channels recovered */ + return err; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +index 3eccdadc03578..773624bb2c5d5 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +@@ -734,7 +734,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, + if (num_tuples <= 0) { + netdev_warn(priv->netdev, "%s: flow is not valid %d\n", + __func__, num_tuples); +- return num_tuples; ++ return num_tuples < 0 ? num_tuples : -EINVAL; + } + + eth_ft = get_flow_table(priv, fs, num_tuples); +diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h +index bc94e75a7aebd..e7777700ee18a 100644 +--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h ++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h +@@ -40,6 +40,7 @@ + */ + #define MLXBF_GIGE_BCAST_MAC_FILTER_IDX 0 + #define MLXBF_GIGE_LOCAL_MAC_FILTER_IDX 1 ++#define MLXBF_GIGE_MAX_FILTER_IDX 3 + + /* Define for broadcast MAC literal */ + #define BCAST_MAC_ADDR 0xFFFFFFFFFFFF +@@ -175,6 +176,13 @@ enum mlxbf_gige_res { + int mlxbf_gige_mdio_probe(struct platform_device *pdev, + struct mlxbf_gige *priv); + void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv); ++ ++void mlxbf_gige_enable_multicast_rx(struct mlxbf_gige *priv); ++void mlxbf_gige_disable_multicast_rx(struct mlxbf_gige *priv); ++void mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige *priv, ++ unsigned int index); ++void mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige *priv, ++ unsigned int index); + void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv, + unsigned int index, u64 dmac); + void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv, +diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c +index 7bb92e2dacda6..57e68bfd3b1a8 100644 +--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c ++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c +@@ -168,6 +168,10 @@ static int mlxbf_gige_open(struct net_device *netdev) + if (err) + goto napi_deinit; + ++ mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_BCAST_MAC_FILTER_IDX); ++ mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX); ++ mlxbf_gige_enable_multicast_rx(priv); ++ + /* Set bits in INT_EN that we care about */ + int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR | + MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS | +@@ -379,6 +383,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev) + void __iomem *plu_base; + void __iomem *base; + int addr, phy_irq; ++ unsigned int i; + int err; + + base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MAC); +@@ -423,6 +428,11 @@ static int mlxbf_gige_probe(struct platform_device *pdev) + priv->rx_q_entries = MLXBF_GIGE_DEFAULT_RXQ_SZ; + priv->tx_q_entries = MLXBF_GIGE_DEFAULT_TXQ_SZ; + ++ for (i = 0; i <= MLXBF_GIGE_MAX_FILTER_IDX; i++) ++ mlxbf_gige_disable_mac_rx_filter(priv, i); ++ mlxbf_gige_disable_multicast_rx(priv); ++ mlxbf_gige_disable_promisc(priv); ++ + /* Write initial MAC address to hardware */ + mlxbf_gige_initial_mac(priv); + +diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h +index cd0973229c9bb..74bd46bab4c05 100644 +--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h ++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h +@@ -62,6 +62,8 @@ + #define MLXBF_GIGE_TX_STATUS_DATA_FIFO_FULL BIT(1) + #define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_START 0x0520 + #define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_END 0x0528 ++#define MLXBF_GIGE_RX_MAC_FILTER_GENERAL 0x0530 ++#define MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST BIT(1) + #define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC 0x0540 + #define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC_EN BIT(0) + #define MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS 0x0548 +diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c +index 6999843584934..eb62620b63c7f 100644 +--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c ++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c +@@ -11,15 +11,31 @@ + #include "mlxbf_gige.h" + #include "mlxbf_gige_regs.h" + +-void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv, +- unsigned int index, u64 dmac) ++void mlxbf_gige_enable_multicast_rx(struct mlxbf_gige *priv) + { + void __iomem *base = priv->base; +- u64 control; ++ u64 data; + +- /* Write destination MAC to specified MAC RX filter */ +- writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER + +- (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE)); ++ data = readq(base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL); ++ data |= MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST; ++ writeq(data, base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL); ++} ++ ++void mlxbf_gige_disable_multicast_rx(struct mlxbf_gige *priv) ++{ ++ void __iomem *base = priv->base; ++ u64 data; ++ ++ data = readq(base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL); ++ data &= ~MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST; ++ writeq(data, base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL); ++} ++ ++void mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige *priv, ++ unsigned int index) ++{ ++ void __iomem *base = priv->base; ++ u64 control; + + /* Enable MAC receive filter mask for specified index */ + control = readq(base + MLXBF_GIGE_CONTROL); +@@ -27,6 +43,28 @@ void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv, + writeq(control, base + MLXBF_GIGE_CONTROL); + } + ++void mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige *priv, ++ unsigned int index) ++{ ++ void __iomem *base = priv->base; ++ u64 control; ++ ++ /* Disable MAC receive filter mask for specified index */ ++ control = readq(base + MLXBF_GIGE_CONTROL); ++ control &= ~(MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC << index); ++ writeq(control, base + MLXBF_GIGE_CONTROL); ++} ++ ++void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv, ++ unsigned int index, u64 dmac) ++{ ++ void __iomem *base = priv->base; ++ ++ /* Write destination MAC to specified MAC RX filter */ ++ writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER + ++ (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE)); ++} ++ + void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv, + unsigned int index, u64 *dmac) + { +diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c +index a09001d22b49c..765c5fc158f8f 100644 +--- a/drivers/net/ethernet/microsoft/mana/mana_en.c ++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c +@@ -599,7 +599,11 @@ static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size, + else + *headroom = XDP_PACKET_HEADROOM; + +- *alloc_size = mtu + MANA_RXBUF_PAD + *headroom; ++ *alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom); ++ ++ /* Using page pool in this case, so alloc_size is PAGE_SIZE */ ++ if (*alloc_size < PAGE_SIZE) ++ *alloc_size = PAGE_SIZE; + + *datasize = mtu + ETH_HLEN; + } +@@ -1774,7 +1778,6 @@ static void mana_poll_rx_cq(struct mana_cq *cq) + static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue) + { + struct mana_cq *cq = context; +- u8 arm_bit; + int w; + + WARN_ON_ONCE(cq->gdma_cq != gdma_queue); +@@ -1785,16 +1788,23 @@ static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue) + mana_poll_tx_cq(cq); + + w = cq->work_done; +- +- if (w < cq->budget && +- napi_complete_done(&cq->napi, w)) { +- arm_bit = SET_ARM_BIT; +- } else { +- arm_bit = 0; ++ cq->work_done_since_doorbell += w; ++ ++ if (w < cq->budget) { ++ mana_gd_ring_cq(gdma_queue, SET_ARM_BIT); ++ cq->work_done_since_doorbell = 0; ++ napi_complete_done(&cq->napi, w); ++ } else if (cq->work_done_since_doorbell > ++ cq->gdma_cq->queue_size / COMP_ENTRY_SIZE * 4) { ++ /* MANA hardware requires at least one doorbell ring every 8 ++ * wraparounds of CQ even if there is no need to arm the CQ. ++ * This driver rings the doorbell as soon as we have exceeded ++ * 4 wraparounds. ++ */ ++ mana_gd_ring_cq(gdma_queue, 0); ++ cq->work_done_since_doorbell = 0; + } + +- mana_gd_ring_cq(gdma_queue, arm_bit); +- + return w; + } + +diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c +index 56ccbd4c37fe6..c2118bde908b1 100644 +--- a/drivers/net/ethernet/mscc/ocelot.c ++++ b/drivers/net/ethernet/mscc/ocelot.c +@@ -1099,6 +1099,48 @@ void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb, + } + EXPORT_SYMBOL(ocelot_ptp_rx_timestamp); + ++void ocelot_lock_inj_grp(struct ocelot *ocelot, int grp) ++ __acquires(&ocelot->inj_lock) ++{ ++ spin_lock(&ocelot->inj_lock); ++} ++EXPORT_SYMBOL_GPL(ocelot_lock_inj_grp); ++ ++void ocelot_unlock_inj_grp(struct ocelot *ocelot, int grp) ++ __releases(&ocelot->inj_lock) ++{ ++ spin_unlock(&ocelot->inj_lock); ++} ++EXPORT_SYMBOL_GPL(ocelot_unlock_inj_grp); ++ ++void ocelot_lock_xtr_grp(struct ocelot *ocelot, int grp) ++ __acquires(&ocelot->inj_lock) ++{ ++ spin_lock(&ocelot->inj_lock); ++} ++EXPORT_SYMBOL_GPL(ocelot_lock_xtr_grp); ++ ++void ocelot_unlock_xtr_grp(struct ocelot *ocelot, int grp) ++ __releases(&ocelot->inj_lock) ++{ ++ spin_unlock(&ocelot->inj_lock); ++} ++EXPORT_SYMBOL_GPL(ocelot_unlock_xtr_grp); ++ ++void ocelot_lock_xtr_grp_bh(struct ocelot *ocelot, int grp) ++ __acquires(&ocelot->xtr_lock) ++{ ++ spin_lock_bh(&ocelot->xtr_lock); ++} ++EXPORT_SYMBOL_GPL(ocelot_lock_xtr_grp_bh); ++ ++void ocelot_unlock_xtr_grp_bh(struct ocelot *ocelot, int grp) ++ __releases(&ocelot->xtr_lock) ++{ ++ spin_unlock_bh(&ocelot->xtr_lock); ++} ++EXPORT_SYMBOL_GPL(ocelot_unlock_xtr_grp_bh); ++ + int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb) + { + u64 timestamp, src_port, len; +@@ -1109,6 +1151,8 @@ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb) + u32 val, *buf; + int err; + ++ lockdep_assert_held(&ocelot->xtr_lock); ++ + err = ocelot_xtr_poll_xfh(ocelot, grp, xfh); + if (err) + return err; +@@ -1184,6 +1228,8 @@ bool ocelot_can_inject(struct ocelot *ocelot, int grp) + { + u32 val = ocelot_read(ocelot, QS_INJ_STATUS); + ++ lockdep_assert_held(&ocelot->inj_lock); ++ + if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp)))) + return false; + if (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp))) +@@ -1193,28 +1239,55 @@ bool ocelot_can_inject(struct ocelot *ocelot, int grp) + } + EXPORT_SYMBOL(ocelot_can_inject); + +-void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag) ++/** ++ * ocelot_ifh_set_basic - Set basic information in Injection Frame Header ++ * @ifh: Pointer to Injection Frame Header memory ++ * @ocelot: Switch private data structure ++ * @port: Egress port number ++ * @rew_op: Egress rewriter operation for PTP ++ * @skb: Pointer to socket buffer (packet) ++ * ++ * Populate the Injection Frame Header with basic information for this skb: the ++ * analyzer bypass bit, destination port, VLAN info, egress rewriter info. ++ */ ++void ocelot_ifh_set_basic(void *ifh, struct ocelot *ocelot, int port, ++ u32 rew_op, struct sk_buff *skb) + { ++ struct ocelot_port *ocelot_port = ocelot->ports[port]; ++ struct net_device *dev = skb->dev; ++ u64 vlan_tci, tag_type; ++ int qos_class; ++ ++ ocelot_xmit_get_vlan_info(skb, ocelot_port->bridge, &vlan_tci, ++ &tag_type); ++ ++ qos_class = netdev_get_num_tc(dev) ? ++ netdev_get_prio_tc_map(dev, skb->priority) : skb->priority; ++ ++ memset(ifh, 0, OCELOT_TAG_LEN); + ocelot_ifh_set_bypass(ifh, 1); ++ ocelot_ifh_set_src(ifh, BIT_ULL(ocelot->num_phys_ports)); + ocelot_ifh_set_dest(ifh, BIT_ULL(port)); +- ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C); +- if (vlan_tag) +- ocelot_ifh_set_vlan_tci(ifh, vlan_tag); ++ ocelot_ifh_set_qos_class(ifh, qos_class); ++ ocelot_ifh_set_tag_type(ifh, tag_type); ++ ocelot_ifh_set_vlan_tci(ifh, vlan_tci); + if (rew_op) + ocelot_ifh_set_rew_op(ifh, rew_op); + } +-EXPORT_SYMBOL(ocelot_ifh_port_set); ++EXPORT_SYMBOL(ocelot_ifh_set_basic); + + void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp, + u32 rew_op, struct sk_buff *skb) + { +- u32 ifh[OCELOT_TAG_LEN / 4] = {0}; ++ u32 ifh[OCELOT_TAG_LEN / 4]; + unsigned int i, count, last; + ++ lockdep_assert_held(&ocelot->inj_lock); ++ + ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | + QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp); + +- ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb)); ++ ocelot_ifh_set_basic(ifh, ocelot, port, rew_op, skb); + + for (i = 0; i < OCELOT_TAG_LEN / 4; i++) + ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp); +@@ -1247,6 +1320,8 @@ EXPORT_SYMBOL(ocelot_port_inject_frame); + + void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp) + { ++ lockdep_assert_held(&ocelot->xtr_lock); ++ + while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) + ocelot_read_rix(ocelot, QS_XTR_RD, grp); + } +@@ -2929,6 +3004,8 @@ int ocelot_init(struct ocelot *ocelot) + mutex_init(&ocelot->fwd_domain_lock); + spin_lock_init(&ocelot->ptp_clock_lock); + spin_lock_init(&ocelot->ts_id_lock); ++ spin_lock_init(&ocelot->inj_lock); ++ spin_lock_init(&ocelot->xtr_lock); + + ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0); + if (!ocelot->owq) +diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.c b/drivers/net/ethernet/mscc/ocelot_fdma.c +index 312a468321544..00326ae8c708b 100644 +--- a/drivers/net/ethernet/mscc/ocelot_fdma.c ++++ b/drivers/net/ethernet/mscc/ocelot_fdma.c +@@ -665,8 +665,7 @@ static int ocelot_fdma_prepare_skb(struct ocelot *ocelot, int port, u32 rew_op, + + ifh = skb_push(skb, OCELOT_TAG_LEN); + skb_put(skb, ETH_FCS_LEN); +- memset(ifh, 0, OCELOT_TAG_LEN); +- ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb)); ++ ocelot_ifh_set_basic(ifh, ocelot, port, rew_op, skb); + + return 0; + } +diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c +index 151b424653483..bc20bd1ef05c7 100644 +--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c ++++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c +@@ -51,6 +51,8 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) + struct ocelot *ocelot = arg; + int grp = 0, err; + ++ ocelot_lock_xtr_grp(ocelot, grp); ++ + while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) { + struct sk_buff *skb; + +@@ -69,6 +71,8 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) + if (err < 0) + ocelot_drain_cpu_queue(ocelot, 0); + ++ ocelot_unlock_xtr_grp(ocelot, grp); ++ + return IRQ_HANDLED; + } + +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c +index fa4237c27e061..35099ad5eccc8 100644 +--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c ++++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c +@@ -215,9 +215,16 @@ static int ionic_sriov_configure(struct pci_dev *pdev, int num_vfs) + + static void ionic_clear_pci(struct ionic *ionic) + { ++ ionic->idev.dev_info_regs = NULL; ++ ionic->idev.dev_cmd_regs = NULL; ++ ionic->idev.intr_status = NULL; ++ ionic->idev.intr_ctrl = NULL; ++ + ionic_unmap_bars(ionic); + pci_release_regions(ionic->pdev); +- pci_disable_device(ionic->pdev); ++ ++ if (pci_is_enabled(ionic->pdev)) ++ pci_disable_device(ionic->pdev); + } + + static int ionic_setup_one(struct ionic *ionic) +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c +index 22ab0a44fa8c7..e242166f0afe7 100644 +--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c ++++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c +@@ -165,9 +165,19 @@ void ionic_dev_teardown(struct ionic *ionic) + } + + /* Devcmd Interface */ +-bool ionic_is_fw_running(struct ionic_dev *idev) ++static bool __ionic_is_fw_running(struct ionic_dev *idev, u8 *status_ptr) + { +- u8 fw_status = ioread8(&idev->dev_info_regs->fw_status); ++ u8 fw_status; ++ ++ if (!idev->dev_info_regs) { ++ if (status_ptr) ++ *status_ptr = 0xff; ++ return false; ++ } ++ ++ fw_status = ioread8(&idev->dev_info_regs->fw_status); ++ if (status_ptr) ++ *status_ptr = fw_status; + + /* firmware is useful only if the running bit is set and + * fw_status != 0xff (bad PCI read) +@@ -175,6 +185,11 @@ bool ionic_is_fw_running(struct ionic_dev *idev) + return (fw_status != 0xff) && (fw_status & IONIC_FW_STS_F_RUNNING); + } + ++bool ionic_is_fw_running(struct ionic_dev *idev) ++{ ++ return __ionic_is_fw_running(idev, NULL); ++} ++ + int ionic_heartbeat_check(struct ionic *ionic) + { + unsigned long check_time, last_check_time; +@@ -199,10 +214,8 @@ int ionic_heartbeat_check(struct ionic *ionic) + goto do_check_time; + } + +- fw_status = ioread8(&idev->dev_info_regs->fw_status); +- + /* If fw_status is not ready don't bother with the generation */ +- if (!ionic_is_fw_running(idev)) { ++ if (!__ionic_is_fw_running(idev, &fw_status)) { + fw_status_ready = false; + } else { + fw_generation = fw_status & IONIC_FW_STS_F_GENERATION; +@@ -306,22 +319,32 @@ int ionic_heartbeat_check(struct ionic *ionic) + + u8 ionic_dev_cmd_status(struct ionic_dev *idev) + { ++ if (!idev->dev_cmd_regs) ++ return (u8)PCI_ERROR_RESPONSE; + return ioread8(&idev->dev_cmd_regs->comp.comp.status); + } + + bool ionic_dev_cmd_done(struct ionic_dev *idev) + { ++ if (!idev->dev_cmd_regs) ++ return false; + return ioread32(&idev->dev_cmd_regs->done) & IONIC_DEV_CMD_DONE; + } + + void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp) + { ++ if (!idev->dev_cmd_regs) ++ return; + memcpy_fromio(comp, &idev->dev_cmd_regs->comp, sizeof(*comp)); + } + + void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd) + { + idev->opcode = cmd->cmd.opcode; ++ ++ if (!idev->dev_cmd_regs) ++ return; ++ + memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd)); + iowrite32(0, &idev->dev_cmd_regs->done); + iowrite32(1, &idev->dev_cmd_regs->doorbell); +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +index 3a6b0a9bc2414..35829a2851fa7 100644 +--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c ++++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +@@ -90,18 +90,23 @@ static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) + { + struct ionic_lif *lif = netdev_priv(netdev); ++ struct ionic_dev *idev; + unsigned int offset; + unsigned int size; + + regs->version = IONIC_DEV_CMD_REG_VERSION; + ++ idev = &lif->ionic->idev; ++ if (!idev->dev_info_regs) ++ return; ++ + offset = 0; + size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32); + memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size); + + offset += size; + size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32); +- memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size); ++ memcpy_fromio(p + offset, idev->dev_cmd_regs->words, size); + } + + static void ionic_get_link_ext_stats(struct net_device *netdev, +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_fw.c b/drivers/net/ethernet/pensando/ionic/ionic_fw.c +index 5f40324cd243f..3c209c1a23373 100644 +--- a/drivers/net/ethernet/pensando/ionic/ionic_fw.c ++++ b/drivers/net/ethernet/pensando/ionic/ionic_fw.c +@@ -109,6 +109,11 @@ int ionic_firmware_update(struct ionic_lif *lif, const struct firmware *fw, + dl = priv_to_devlink(ionic); + devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0); + ++ if (!idev->dev_cmd_regs) { ++ err = -ENXIO; ++ goto err_out; ++ } ++ + buf_sz = sizeof(idev->dev_cmd_regs->data); + + netdev_dbg(netdev, +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c +index f019277fec572..3ca6893d1bf26 100644 +--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c ++++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c +@@ -438,6 +438,9 @@ static void ionic_dev_cmd_clean(struct ionic *ionic) + { + struct ionic_dev *idev = &ionic->idev; + ++ if (!idev->dev_cmd_regs) ++ return; ++ + iowrite32(0, &idev->dev_cmd_regs->doorbell); + memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd)); + } +diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c +index 591f5b7b6da65..5007addd119aa 100644 +--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c ++++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c +@@ -215,10 +215,14 @@ int ngbe_phy_connect(struct wx *wx) + { + int ret; + ++ /* The MAC only has add the Tx delay and it can not be modified. ++ * So just disable TX delay in PHY, and it is does not matter to ++ * internal phy. ++ */ + ret = phy_connect_direct(wx->netdev, + wx->phydev, + ngbe_handle_link_change, +- PHY_INTERFACE_MODE_RGMII_ID); ++ PHY_INTERFACE_MODE_RGMII_RXID); + if (ret) { + wx_err(wx, "PHY connect failed.\n"); + return ret; +diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h +index 575ff9de8985b..f09f10f17d7ea 100644 +--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h +@@ -159,16 +159,17 @@ + #define XAE_RCW1_OFFSET 0x00000404 /* Rx Configuration Word 1 */ + #define XAE_TC_OFFSET 0x00000408 /* Tx Configuration */ + #define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */ +-#define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */ +-#define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */ ++#define XAE_EMMC_OFFSET 0x00000410 /* MAC speed configuration */ ++#define XAE_PHYC_OFFSET 0x00000414 /* RX Max Frame Configuration */ + #define XAE_ID_OFFSET 0x000004F8 /* Identification register */ +-#define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */ +-#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */ +-#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */ +-#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */ ++#define XAE_MDIO_MC_OFFSET 0x00000500 /* MDIO Setup */ ++#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MDIO Control */ ++#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MDIO Write Data */ ++#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MDIO Read Data */ + #define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */ + #define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */ +-#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */ ++#define XAE_FMI_OFFSET 0x00000708 /* Frame Filter Control */ ++#define XAE_FFE_OFFSET 0x0000070C /* Frame Filter Enable */ + #define XAE_AF0_OFFSET 0x00000710 /* Address Filter 0 */ + #define XAE_AF1_OFFSET 0x00000714 /* Address Filter 1 */ + +@@ -307,7 +308,7 @@ + */ + #define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF + +-/* Bit masks for Axi Ethernet FMI register */ ++/* Bit masks for Axi Ethernet FMC register */ + #define XAE_FMI_PM_MASK 0x80000000 /* Promis. mode enable */ + #define XAE_FMI_IND_MASK 0x00000003 /* Index Mask */ + +diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +index 11e08cb8d3c3e..144feb7a2fdac 100644 +--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +@@ -412,7 +412,7 @@ static int netdev_set_mac_address(struct net_device *ndev, void *p) + */ + static void axienet_set_multicast_list(struct net_device *ndev) + { +- int i; ++ int i = 0; + u32 reg, af0reg, af1reg; + struct axienet_local *lp = netdev_priv(ndev); + +@@ -430,7 +430,10 @@ static void axienet_set_multicast_list(struct net_device *ndev) + } else if (!netdev_mc_empty(ndev)) { + struct netdev_hw_addr *ha; + +- i = 0; ++ reg = axienet_ior(lp, XAE_FMI_OFFSET); ++ reg &= ~XAE_FMI_PM_MASK; ++ axienet_iow(lp, XAE_FMI_OFFSET, reg); ++ + netdev_for_each_mc_addr(ha, ndev) { + if (i >= XAE_MULTICAST_CAM_TABLE_NUM) + break; +@@ -449,6 +452,7 @@ static void axienet_set_multicast_list(struct net_device *ndev) + axienet_iow(lp, XAE_FMI_OFFSET, reg); + axienet_iow(lp, XAE_AF0_OFFSET, af0reg); + axienet_iow(lp, XAE_AF1_OFFSET, af1reg); ++ axienet_iow(lp, XAE_FFE_OFFSET, 1); + i++; + } + } else { +@@ -456,18 +460,15 @@ static void axienet_set_multicast_list(struct net_device *ndev) + reg &= ~XAE_FMI_PM_MASK; + + axienet_iow(lp, XAE_FMI_OFFSET, reg); +- +- for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { +- reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; +- reg |= i; +- +- axienet_iow(lp, XAE_FMI_OFFSET, reg); +- axienet_iow(lp, XAE_AF0_OFFSET, 0); +- axienet_iow(lp, XAE_AF1_OFFSET, 0); +- } +- + dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); + } ++ ++ for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { ++ reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; ++ reg |= i; ++ axienet_iow(lp, XAE_FMI_OFFSET, reg); ++ axienet_iow(lp, XAE_FFE_OFFSET, 0); ++ } + } + + /** +diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c +index 63f932256c9f5..931b65591f4d1 100644 +--- a/drivers/net/gtp.c ++++ b/drivers/net/gtp.c +@@ -901,6 +901,9 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) + if (skb_cow_head(skb, dev->needed_headroom)) + goto tx_err; + ++ if (!pskb_inet_may_pull(skb)) ++ goto tx_err; ++ + skb_reset_inner_headers(skb); + + /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */ +diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c +index 33f2c189b4d86..4247c0f840a48 100644 +--- a/drivers/net/wireless/ath/ath11k/mac.c ++++ b/drivers/net/wireless/ath/ath11k/mac.c +@@ -8910,7 +8910,7 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw, + { + struct ath11k *ar = hw->priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); +- struct scan_req_params arg; ++ struct scan_req_params *arg; + int ret; + u32 scan_time_msec; + +@@ -8942,27 +8942,31 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw, + + scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2; + +- memset(&arg, 0, sizeof(arg)); +- ath11k_wmi_start_scan_init(ar, &arg); +- arg.num_chan = 1; +- arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list), +- GFP_KERNEL); +- if (!arg.chan_list) { ++ arg = kzalloc(sizeof(*arg), GFP_KERNEL); ++ if (!arg) { + ret = -ENOMEM; + goto exit; + } ++ ath11k_wmi_start_scan_init(ar, arg); ++ arg->num_chan = 1; ++ arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list), ++ GFP_KERNEL); ++ if (!arg->chan_list) { ++ ret = -ENOMEM; ++ goto free_arg; ++ } + +- arg.vdev_id = arvif->vdev_id; +- arg.scan_id = ATH11K_SCAN_ID; +- arg.chan_list[0] = chan->center_freq; +- arg.dwell_time_active = scan_time_msec; +- arg.dwell_time_passive = scan_time_msec; +- arg.max_scan_time = scan_time_msec; +- arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE; +- arg.scan_flags |= WMI_SCAN_FILTER_PROBE_REQ; +- arg.burst_duration = duration; +- +- ret = ath11k_start_scan(ar, &arg); ++ arg->vdev_id = arvif->vdev_id; ++ arg->scan_id = ATH11K_SCAN_ID; ++ arg->chan_list[0] = chan->center_freq; ++ arg->dwell_time_active = scan_time_msec; ++ arg->dwell_time_passive = scan_time_msec; ++ arg->max_scan_time = scan_time_msec; ++ arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE; ++ arg->scan_flags |= WMI_SCAN_FILTER_PROBE_REQ; ++ arg->burst_duration = duration; ++ ++ ret = ath11k_start_scan(ar, arg); + if (ret) { + ath11k_warn(ar->ab, "failed to start roc scan: %d\n", ret); + +@@ -8988,7 +8992,9 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw, + ret = 0; + + free_chan_list: +- kfree(arg.chan_list); ++ kfree(arg->chan_list); ++free_arg: ++ kfree(arg); + exit: + mutex_unlock(&ar->conf_mutex); + return ret; +diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c +index 61435e4489b9f..ba6fc27f4a1a1 100644 +--- a/drivers/net/wireless/ath/ath12k/mac.c ++++ b/drivers/net/wireless/ath/ath12k/mac.c +@@ -6039,13 +6039,28 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, + if (WARN_ON(!arvif->is_started)) + continue; + +- if (WARN_ON(!arvif->is_up)) +- continue; ++ /* Firmware expect vdev_restart only if vdev is up. ++ * If vdev is down then it expect vdev_stop->vdev_start. ++ */ ++ if (arvif->is_up) { ++ ret = ath12k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def); ++ if (ret) { ++ ath12k_warn(ab, "failed to restart vdev %d: %d\n", ++ arvif->vdev_id, ret); ++ continue; ++ } ++ } else { ++ ret = ath12k_mac_vdev_stop(arvif); ++ if (ret) { ++ ath12k_warn(ab, "failed to stop vdev %d: %d\n", ++ arvif->vdev_id, ret); ++ continue; ++ } + +- ret = ath12k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def); +- if (ret) { +- ath12k_warn(ab, "failed to restart vdev %d: %d\n", +- arvif->vdev_id, ret); ++ ret = ath12k_mac_vdev_start(arvif, &vifs[i].new_ctx->def); ++ if (ret) ++ ath12k_warn(ab, "failed to start vdev %d: %d\n", ++ arvif->vdev_id, ret); + continue; + } + +diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c +index e68accbc837f4..f1379a5e60cdd 100644 +--- a/drivers/net/wireless/ath/ath12k/qmi.c ++++ b/drivers/net/wireless/ath/ath12k/qmi.c +@@ -1977,6 +1977,7 @@ static int ath12k_qmi_host_cap_send(struct ath12k_base *ab) + QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN, + qmi_wlanfw_host_cap_req_msg_v01_ei, &req); + if (ret < 0) { ++ qmi_txn_cancel(&txn); + ath12k_warn(ab, "Failed to send host capability request,err = %d\n", ret); + goto out; + } +@@ -2040,6 +2041,7 @@ static int ath12k_qmi_fw_ind_register_send(struct ath12k_base *ab) + QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN, + qmi_wlanfw_ind_register_req_msg_v01_ei, req); + if (ret < 0) { ++ qmi_txn_cancel(&txn); + ath12k_warn(ab, "Failed to send indication register request, err = %d\n", + ret); + goto out; +@@ -2114,6 +2116,7 @@ static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab) + QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN, + qmi_wlanfw_respond_mem_req_msg_v01_ei, req); + if (ret < 0) { ++ qmi_txn_cancel(&txn); + ath12k_warn(ab, "qmi failed to respond memory request, err = %d\n", + ret); + goto out; +@@ -2228,6 +2231,7 @@ static int ath12k_qmi_request_target_cap(struct ath12k_base *ab) + QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN, + qmi_wlanfw_cap_req_msg_v01_ei, &req); + if (ret < 0) { ++ qmi_txn_cancel(&txn); + ath12k_warn(ab, "qmi failed to send target cap request, err = %d\n", + ret); + goto out; +@@ -2567,6 +2571,7 @@ static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab) + QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN, + qmi_wlanfw_m3_info_req_msg_v01_ei, &req); + if (ret < 0) { ++ qmi_txn_cancel(&txn); + ath12k_warn(ab, "qmi failed to send M3 information request, err = %d\n", + ret); + goto out; +@@ -2613,6 +2618,7 @@ static int ath12k_qmi_wlanfw_mode_send(struct ath12k_base *ab, + QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN, + qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req); + if (ret < 0) { ++ qmi_txn_cancel(&txn); + ath12k_warn(ab, "qmi failed to send mode request, mode: %d, err = %d\n", + mode, ret); + goto out; +@@ -2704,6 +2710,7 @@ static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab) + QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN, + qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req); + if (ret < 0) { ++ qmi_txn_cancel(&txn); + ath12k_warn(ab, "qmi failed to send wlan config request, err = %d\n", + ret); + goto out; +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +index c230bc8900a5c..da4968e66725b 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +@@ -4321,9 +4321,16 @@ brcmf_pmksa_v3_op(struct brcmf_if *ifp, struct cfg80211_pmksa *pmksa, + /* Single PMK operation */ + pmk_op->count = cpu_to_le16(1); + length += sizeof(struct brcmf_pmksa_v3); +- memcpy(pmk_op->pmk[0].bssid, pmksa->bssid, ETH_ALEN); +- memcpy(pmk_op->pmk[0].pmkid, pmksa->pmkid, WLAN_PMKID_LEN); +- pmk_op->pmk[0].pmkid_len = WLAN_PMKID_LEN; ++ if (pmksa->bssid) ++ memcpy(pmk_op->pmk[0].bssid, pmksa->bssid, ETH_ALEN); ++ if (pmksa->pmkid) { ++ memcpy(pmk_op->pmk[0].pmkid, pmksa->pmkid, WLAN_PMKID_LEN); ++ pmk_op->pmk[0].pmkid_len = WLAN_PMKID_LEN; ++ } ++ if (pmksa->ssid && pmksa->ssid_len) { ++ memcpy(pmk_op->pmk[0].ssid.SSID, pmksa->ssid, pmksa->ssid_len); ++ pmk_op->pmk[0].ssid.SSID_len = pmksa->ssid_len; ++ } + pmk_op->pmk[0].time_left = cpu_to_le32(alive ? BRCMF_PMKSA_NO_EXPIRY : 0); + } + +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c +index 3cdbc6ac7ae5d..3356e36e2af73 100644 +--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c ++++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c +@@ -141,7 +141,11 @@ static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt, + + event_cfg.enabled_severities = cpu_to_le32(enabled_severities); + +- ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); ++ if (fwrt->ops && fwrt->ops->send_hcmd) ++ ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd); ++ else ++ ret = -EPERM; ++ + IWL_INFO(fwrt, + "sent host event cfg with enabled_severities: %u, ret: %d\n", + enabled_severities, ret); +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +index a56593b6135f6..47bea1855e8c8 100644 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +@@ -1304,10 +1304,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, + case IWL_UCODE_TLV_CURRENT_PC: + if (tlv_len < sizeof(struct iwl_pc_data)) + goto invalid_tlv_len; +- drv->trans->dbg.num_pc = +- tlv_len / sizeof(struct iwl_pc_data); + drv->trans->dbg.pc_data = + kmemdup(tlv_data, tlv_len, GFP_KERNEL); ++ if (!drv->trans->dbg.pc_data) ++ return -ENOMEM; ++ drv->trans->dbg.num_pc = ++ tlv_len / sizeof(struct iwl_pc_data); + break; + default: + IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +index 9c89f0dd69c86..08d1fab7f53c3 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +@@ -1849,9 +1849,12 @@ iwl_mvm_d3_set_igtk_bigtk_ipn(const struct iwl_multicast_key_data *key, + memcpy(seq->aes_gmac.pn, key->ipn, sizeof(seq->aes_gmac.pn)); + break; + case WLAN_CIPHER_SUITE_BIP_CMAC_256: ++ case WLAN_CIPHER_SUITE_AES_CMAC: + BUILD_BUG_ON(sizeof(seq->aes_cmac.pn) != sizeof(key->ipn)); + memcpy(seq->aes_cmac.pn, key->ipn, sizeof(seq->aes_cmac.pn)); + break; ++ default: ++ WARN_ON(1); + } + } + +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +index 8f49de1206e03..f973efbbc3795 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +@@ -1035,6 +1035,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, + spin_unlock_bh(&mvm->time_event_lock); + + memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); ++ mvmvif->ap_sta = NULL; + + for_each_mvm_vif_valid_link(mvmvif, link_id) { + mvmvif->link[link_id]->ap_sta_id = IWL_MVM_INVALID_STA; +@@ -3934,7 +3935,11 @@ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw, + + mutex_lock(&mvm->mutex); + +- /* this would be a mac80211 bug ... but don't crash */ ++ /* this would be a mac80211 bug ... but don't crash, unless we had a ++ * firmware crash while we were activating a link, in which case it is ++ * legit to have phy_ctxt = NULL. Don't bother not to WARN if we are in ++ * recovery flow since we spit tons of error messages anyway. ++ */ + for_each_sta_active_link(vif, sta, link_sta, link_id) { + if (WARN_ON_ONCE(!mvmvif->link[link_id]->phy_ctxt)) { + mutex_unlock(&mvm->mutex); +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +index 6ef932e6299da..9ca90c0806c0f 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +@@ -3413,7 +3413,7 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify) + if (!(mvm->scan_status & type)) + return 0; + +- if (iwl_mvm_is_radio_killed(mvm)) { ++ if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) { + ret = 0; + goto out; + } +diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c +index dbab400969202..85bffcf4f6fbf 100644 +--- a/drivers/net/wireless/mediatek/mt76/mac80211.c ++++ b/drivers/net/wireless/mediatek/mt76/mac80211.c +@@ -415,6 +415,9 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw) + struct mt76_dev *dev = phy->dev; + struct wiphy *wiphy = hw->wiphy; + ++ INIT_LIST_HEAD(&phy->tx_list); ++ spin_lock_init(&phy->tx_lock); ++ + SET_IEEE80211_DEV(hw, dev->dev); + SET_IEEE80211_PERM_ADDR(hw, phy->macaddr); + +@@ -688,6 +691,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, + int ret; + + dev_set_drvdata(dev->dev, dev); ++ mt76_wcid_init(&dev->global_wcid); + ret = mt76_phy_init(phy, hw); + if (ret) + return ret; +@@ -743,6 +747,7 @@ void mt76_unregister_device(struct mt76_dev *dev) + if (IS_ENABLED(CONFIG_MT76_LEDS)) + mt76_led_cleanup(&dev->phy); + mt76_tx_status_check(dev, true); ++ mt76_wcid_cleanup(dev, &dev->global_wcid); + ieee80211_unregister_hw(hw); + } + EXPORT_SYMBOL_GPL(mt76_unregister_device); +@@ -1411,7 +1416,7 @@ mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif, + wcid->phy_idx = phy->band_idx; + rcu_assign_pointer(dev->wcid[wcid->idx], wcid); + +- mt76_packet_id_init(wcid); ++ mt76_wcid_init(wcid); + out: + mutex_unlock(&dev->mutex); + +@@ -1430,7 +1435,7 @@ void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, + if (dev->drv->sta_remove) + dev->drv->sta_remove(dev, vif, sta); + +- mt76_packet_id_flush(dev, wcid); ++ mt76_wcid_cleanup(dev, wcid); + + mt76_wcid_mask_clear(dev->wcid_mask, idx); + mt76_wcid_mask_clear(dev->wcid_phy_mask, idx); +@@ -1486,6 +1491,47 @@ void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + } + EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove); + ++void mt76_wcid_init(struct mt76_wcid *wcid) ++{ ++ INIT_LIST_HEAD(&wcid->tx_list); ++ skb_queue_head_init(&wcid->tx_pending); ++ ++ INIT_LIST_HEAD(&wcid->list); ++ idr_init(&wcid->pktid); ++} ++EXPORT_SYMBOL_GPL(mt76_wcid_init); ++ ++void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid) ++{ ++ struct mt76_phy *phy = dev->phys[wcid->phy_idx]; ++ struct ieee80211_hw *hw; ++ struct sk_buff_head list; ++ struct sk_buff *skb; ++ ++ mt76_tx_status_lock(dev, &list); ++ mt76_tx_status_skb_get(dev, wcid, -1, &list); ++ mt76_tx_status_unlock(dev, &list); ++ ++ idr_destroy(&wcid->pktid); ++ ++ spin_lock_bh(&phy->tx_lock); ++ ++ if (!list_empty(&wcid->tx_list)) ++ list_del_init(&wcid->tx_list); ++ ++ spin_lock(&wcid->tx_pending.lock); ++ skb_queue_splice_tail_init(&wcid->tx_pending, &list); ++ spin_unlock(&wcid->tx_pending.lock); ++ ++ spin_unlock_bh(&phy->tx_lock); ++ ++ while ((skb = __skb_dequeue(&list)) != NULL) { ++ hw = mt76_tx_status_get_hw(dev, skb); ++ ieee80211_free_txskb(hw, skb); ++ } ++} ++EXPORT_SYMBOL_GPL(mt76_wcid_cleanup); ++ + int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + int *dbm) + { +diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h +index 7f44736ca26f0..8b620d4fed439 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76.h ++++ b/drivers/net/wireless/mediatek/mt76/mt76.h +@@ -334,6 +334,9 @@ struct mt76_wcid { + u32 tx_info; + bool sw_iv; + ++ struct list_head tx_list; ++ struct sk_buff_head tx_pending; ++ + struct list_head list; + struct idr pktid; + +@@ -719,6 +722,8 @@ struct mt76_phy { + unsigned long state; + u8 band_idx; + ++ spinlock_t tx_lock; ++ struct list_head tx_list; + struct mt76_queue *q_tx[__MT_TXQ_MAX]; + + struct cfg80211_chan_def chandef; +@@ -1599,22 +1604,7 @@ mt76_token_put(struct mt76_dev *dev, int token) + return txwi; + } + +-static inline void mt76_packet_id_init(struct mt76_wcid *wcid) +-{ +- INIT_LIST_HEAD(&wcid->list); +- idr_init(&wcid->pktid); +-} +- +-static inline void +-mt76_packet_id_flush(struct mt76_dev *dev, struct mt76_wcid *wcid) +-{ +- struct sk_buff_head list; +- +- mt76_tx_status_lock(dev, &list); +- mt76_tx_status_skb_get(dev, wcid, -1, &list); +- mt76_tx_status_unlock(dev, &list); +- +- idr_destroy(&wcid->pktid); +-} ++void mt76_wcid_init(struct mt76_wcid *wcid); ++void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid); + + #endif +diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c +index c213fd2a5216b..89d738deea62e 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c +@@ -70,7 +70,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) + mvif->sta.wcid.idx = idx; + mvif->sta.wcid.hw_key_idx = -1; + mvif->sta.vif = mvif; +- mt76_packet_id_init(&mvif->sta.wcid); ++ mt76_wcid_init(&mvif->sta.wcid); + + eth_broadcast_addr(bc_addr); + mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr); +@@ -110,7 +110,7 @@ mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) + dev->mt76.vif_mask &= ~BIT_ULL(mvif->idx); + mutex_unlock(&dev->mt76.mutex); + +- mt76_packet_id_flush(&dev->mt76, &mvif->sta.wcid); ++ mt76_wcid_cleanup(&dev->mt76, &mvif->sta.wcid); + } + + void mt7603_init_edcca(struct mt7603_dev *dev) +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c +index 200b1752ca77f..dab16b5fc3861 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c +@@ -226,7 +226,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, + mvif->sta.wcid.idx = idx; + mvif->sta.wcid.phy_idx = mvif->mt76.band_idx; + mvif->sta.wcid.hw_key_idx = -1; +- mt76_packet_id_init(&mvif->sta.wcid); ++ mt76_wcid_init(&mvif->sta.wcid); + + mt7615_mac_wtbl_update(dev, idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); +@@ -279,7 +279,7 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw, + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); + +- mt76_packet_id_flush(&dev->mt76, &mvif->sta.wcid); ++ mt76_wcid_cleanup(&dev->mt76, &mvif->sta.wcid); + } + + int mt7615_set_channel(struct mt7615_phy *phy) +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +index dcbb5c605dfe6..8a0e8124b8940 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c ++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +@@ -288,7 +288,7 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, + mvif->idx = idx; + mvif->group_wcid.idx = MT_VIF_WCID(idx); + mvif->group_wcid.hw_key_idx = -1; +- mt76_packet_id_init(&mvif->group_wcid); ++ mt76_wcid_init(&mvif->group_wcid); + + mtxq = (struct mt76_txq *)vif->txq->drv_priv; + rcu_assign_pointer(dev->mt76.wcid[MT_VIF_WCID(idx)], &mvif->group_wcid); +@@ -346,7 +346,7 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw, + + dev->mt76.vif_mask &= ~BIT_ULL(mvif->idx); + rcu_assign_pointer(dev->mt76.wcid[mvif->group_wcid.idx], NULL); +- mt76_packet_id_flush(&dev->mt76, &mvif->group_wcid); ++ mt76_wcid_cleanup(&dev->mt76, &mvif->group_wcid); + } + EXPORT_SYMBOL_GPL(mt76x02_remove_interface); + +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c +index 3196f56cdf4ab..260fe00d7dc6d 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c +@@ -253,7 +253,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw, + mvif->sta.wcid.phy_idx = ext_phy; + mvif->sta.wcid.hw_key_idx = -1; + mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET; +- mt76_packet_id_init(&mvif->sta.wcid); ++ mt76_wcid_init(&mvif->sta.wcid); + + mt7915_mac_wtbl_update(dev, idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); +@@ -314,7 +314,7 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw, + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); + +- mt76_packet_id_flush(&dev->mt76, &msta->wcid); ++ mt76_wcid_cleanup(&dev->mt76, &msta->wcid); + } + + int mt7915_set_channel(struct mt7915_phy *phy) +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c +index d8851cb5f400b..6a5c2cae087d0 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c +@@ -318,7 +318,7 @@ mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) + mvif->sta.wcid.phy_idx = mvif->mt76.band_idx; + mvif->sta.wcid.hw_key_idx = -1; + mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET; +- mt76_packet_id_init(&mvif->sta.wcid); ++ mt76_wcid_init(&mvif->sta.wcid); + + mt7921_mac_wtbl_update(dev, idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); +diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c +index 2fb1141e5fa96..66806ed4f942d 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c ++++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c +@@ -115,7 +115,7 @@ void mt792x_remove_interface(struct ieee80211_hw *hw, + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); + +- mt76_packet_id_flush(&dev->mt76, &msta->wcid); ++ mt76_wcid_cleanup(&dev->mt76, &msta->wcid); + } + EXPORT_SYMBOL_GPL(mt792x_remove_interface); + +diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c +index 620880e560e00..7fea9f0d409bf 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c +@@ -207,7 +207,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw, + mvif->sta.wcid.phy_idx = band_idx; + mvif->sta.wcid.hw_key_idx = -1; + mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET; +- mt76_packet_id_init(&mvif->sta.wcid); ++ mt76_wcid_init(&mvif->sta.wcid); + + mt7996_mac_wtbl_update(dev, idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); +@@ -268,7 +268,7 @@ static void mt7996_remove_interface(struct ieee80211_hw *hw, + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); + +- mt76_packet_id_flush(&dev->mt76, &msta->wcid); ++ mt76_wcid_cleanup(&dev->mt76, &msta->wcid); + } + + int mt7996_set_channel(struct mt7996_phy *phy) +diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c +index 6cc26cc6c5178..1809b03292c3d 100644 +--- a/drivers/net/wireless/mediatek/mt76/tx.c ++++ b/drivers/net/wireless/mediatek/mt76/tx.c +@@ -329,40 +329,32 @@ void + mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, + struct mt76_wcid *wcid, struct sk_buff *skb) + { +- struct mt76_dev *dev = phy->dev; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); +- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; +- struct mt76_queue *q; +- int qid = skb_get_queue_mapping(skb); + + if (mt76_testmode_enabled(phy)) { + ieee80211_free_txskb(phy->hw, skb); + return; + } + +- if (WARN_ON(qid >= MT_TXQ_PSD)) { +- qid = MT_TXQ_BE; +- skb_set_queue_mapping(skb, qid); +- } +- +- if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && +- !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && +- !ieee80211_is_data(hdr->frame_control) && +- !ieee80211_is_bufferable_mmpdu(skb)) { +- qid = MT_TXQ_PSD; +- } ++ if (WARN_ON(skb_get_queue_mapping(skb) >= MT_TXQ_PSD)) ++ skb_set_queue_mapping(skb, MT_TXQ_BE); + + if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET)) + ieee80211_get_tx_rates(info->control.vif, sta, skb, + info->control.rates, 1); + + info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx); +- q = phy->q_tx[qid]; + +- spin_lock_bh(&q->lock); +- __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); +- dev->queue_ops->kick(dev, q); +- spin_unlock_bh(&q->lock); ++ spin_lock_bh(&wcid->tx_pending.lock); ++ __skb_queue_tail(&wcid->tx_pending, skb); ++ spin_unlock_bh(&wcid->tx_pending.lock); ++ ++ spin_lock_bh(&phy->tx_lock); ++ if (list_empty(&wcid->tx_list)) ++ list_add_tail(&wcid->tx_list, &phy->tx_list); ++ spin_unlock_bh(&phy->tx_lock); ++ ++ mt76_worker_schedule(&phy->dev->tx_worker); + } + EXPORT_SYMBOL_GPL(mt76_tx); + +@@ -593,10 +585,86 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid) + } + EXPORT_SYMBOL_GPL(mt76_txq_schedule); + ++static int ++mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid) ++{ ++ struct mt76_dev *dev = phy->dev; ++ struct ieee80211_sta *sta; ++ struct mt76_queue *q; ++ struct sk_buff *skb; ++ int ret = 0; ++ ++ spin_lock(&wcid->tx_pending.lock); ++ while ((skb = skb_peek(&wcid->tx_pending)) != NULL) { ++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; ++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); ++ int qid = skb_get_queue_mapping(skb); ++ ++ if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && ++ !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && ++ !ieee80211_is_data(hdr->frame_control) && ++ !ieee80211_is_bufferable_mmpdu(skb)) ++ qid = MT_TXQ_PSD; ++ ++ q = phy->q_tx[qid]; ++ if (mt76_txq_stopped(q)) { ++ ret = -1; ++ break; ++ } ++ ++ __skb_unlink(skb, &wcid->tx_pending); ++ spin_unlock(&wcid->tx_pending.lock); ++ ++ sta = wcid_to_sta(wcid); ++ spin_lock(&q->lock); ++ __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); ++ dev->queue_ops->kick(dev, q); ++ spin_unlock(&q->lock); ++ ++ spin_lock(&wcid->tx_pending.lock); ++ } ++ spin_unlock(&wcid->tx_pending.lock); ++ ++ return ret; ++} ++ ++static void mt76_txq_schedule_pending(struct mt76_phy *phy) ++{ ++ if (list_empty(&phy->tx_list)) ++ return; ++ ++ local_bh_disable(); ++ rcu_read_lock(); ++ ++ spin_lock(&phy->tx_lock); ++ while (!list_empty(&phy->tx_list)) { ++ struct mt76_wcid *wcid = NULL; ++ int ret; ++ ++ wcid = list_first_entry(&phy->tx_list, struct mt76_wcid, tx_list); ++ list_del_init(&wcid->tx_list); ++ ++ spin_unlock(&phy->tx_lock); ++ ret = mt76_txq_schedule_pending_wcid(phy, wcid); ++ spin_lock(&phy->tx_lock); ++ ++ if (ret) { ++ if (list_empty(&wcid->tx_list)) ++ list_add_tail(&wcid->tx_list, &phy->tx_list); ++ break; ++ } ++ } ++ spin_unlock(&phy->tx_lock); ++ ++ rcu_read_unlock(); ++ local_bh_enable(); ++} ++ + void mt76_txq_schedule_all(struct mt76_phy *phy) + { + int i; + ++ mt76_txq_schedule_pending(phy); + for (i = 0; i <= MT_TXQ_BK; i++) + mt76_txq_schedule(phy, i); + } +diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c +index 6894b919ff94b..e16e9ae90d204 100644 +--- a/drivers/net/wireless/st/cw1200/txrx.c ++++ b/drivers/net/wireless/st/cw1200/txrx.c +@@ -1166,7 +1166,7 @@ void cw1200_rx_cb(struct cw1200_common *priv, + size_t ies_len = skb->len - (ies - (u8 *)(skb->data)); + + tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len); +- if (tim_ie) { ++ if (tim_ie && tim_ie[1] >= sizeof(struct ieee80211_tim_ie)) { + struct ieee80211_tim_ie *tim = + (struct ieee80211_tim_ie *)&tim_ie[2]; + +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index e969da0a681b4..82509f3679373 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -632,7 +632,7 @@ static void nvme_free_ns(struct kref *kref) + kfree(ns); + } + +-static inline bool nvme_get_ns(struct nvme_ns *ns) ++bool nvme_get_ns(struct nvme_ns *ns) + { + return kref_get_unless_zero(&ns->kref); + } +@@ -1313,8 +1313,10 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) + + error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, + sizeof(struct nvme_id_ctrl)); +- if (error) ++ if (error) { + kfree(*id); ++ *id = NULL; ++ } + return error; + } + +@@ -1443,6 +1445,7 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, + if (error) { + dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); + kfree(*id); ++ *id = NULL; + } + return error; + } +@@ -3539,9 +3542,10 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info) + struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) + { + struct nvme_ns *ns, *ret = NULL; ++ int srcu_idx; + +- down_read(&ctrl->namespaces_rwsem); +- list_for_each_entry(ns, &ctrl->namespaces, list) { ++ srcu_idx = srcu_read_lock(&ctrl->srcu); ++ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { + if (ns->head->ns_id == nsid) { + if (!nvme_get_ns(ns)) + continue; +@@ -3551,7 +3555,7 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) + if (ns->head->ns_id > nsid) + break; + } +- up_read(&ctrl->namespaces_rwsem); ++ srcu_read_unlock(&ctrl->srcu, srcu_idx); + return ret; + } + EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU); +@@ -3565,7 +3569,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) + + list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) { + if (tmp->head->ns_id < ns->head->ns_id) { +- list_add(&ns->list, &tmp->list); ++ list_add_rcu(&ns->list, &tmp->list); + return; + } + } +@@ -3631,17 +3635,18 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) + if (nvme_update_ns_info(ns, info)) + goto out_unlink_ns; + +- down_write(&ctrl->namespaces_rwsem); ++ mutex_lock(&ctrl->namespaces_lock); + /* + * Ensure that no namespaces are added to the ctrl list after the queues + * are frozen, thereby avoiding a deadlock between scan and reset. + */ + if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) { +- up_write(&ctrl->namespaces_rwsem); ++ mutex_unlock(&ctrl->namespaces_lock); + goto out_unlink_ns; + } + nvme_ns_add_to_ctrl_list(ns); +- up_write(&ctrl->namespaces_rwsem); ++ mutex_unlock(&ctrl->namespaces_lock); ++ synchronize_srcu(&ctrl->srcu); + nvme_get_ctrl(ctrl); + + if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups)) +@@ -3657,9 +3662,10 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) + + out_cleanup_ns_from_list: + nvme_put_ctrl(ctrl); +- down_write(&ctrl->namespaces_rwsem); +- list_del_init(&ns->list); +- up_write(&ctrl->namespaces_rwsem); ++ mutex_lock(&ctrl->namespaces_lock); ++ list_del_rcu(&ns->list); ++ mutex_unlock(&ctrl->namespaces_lock); ++ synchronize_srcu(&ctrl->srcu); + out_unlink_ns: + mutex_lock(&ctrl->subsys->lock); + list_del_rcu(&ns->siblings); +@@ -3709,9 +3715,10 @@ static void nvme_ns_remove(struct nvme_ns *ns) + nvme_cdev_del(&ns->cdev, &ns->cdev_device); + del_gendisk(ns->disk); + +- down_write(&ns->ctrl->namespaces_rwsem); +- list_del_init(&ns->list); +- up_write(&ns->ctrl->namespaces_rwsem); ++ mutex_lock(&ns->ctrl->namespaces_lock); ++ list_del_rcu(&ns->list); ++ mutex_unlock(&ns->ctrl->namespaces_lock); ++ synchronize_srcu(&ns->ctrl->srcu); + + if (last_path) + nvme_mpath_shutdown_disk(ns->head); +@@ -3801,16 +3808,18 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, + struct nvme_ns *ns, *next; + LIST_HEAD(rm_list); + +- down_write(&ctrl->namespaces_rwsem); ++ mutex_lock(&ctrl->namespaces_lock); + list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { +- if (ns->head->ns_id > nsid) +- list_move_tail(&ns->list, &rm_list); ++ if (ns->head->ns_id > nsid) { ++ list_del_rcu(&ns->list); ++ synchronize_srcu(&ctrl->srcu); ++ list_add_tail_rcu(&ns->list, &rm_list); ++ } + } +- up_write(&ctrl->namespaces_rwsem); ++ mutex_unlock(&ctrl->namespaces_lock); + + list_for_each_entry_safe(ns, next, &rm_list, list) + nvme_ns_remove(ns); +- + } + + static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) +@@ -3980,9 +3989,10 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl) + /* this is a no-op when called from the controller reset handler */ + nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO); + +- down_write(&ctrl->namespaces_rwsem); +- list_splice_init(&ctrl->namespaces, &ns_list); +- up_write(&ctrl->namespaces_rwsem); ++ mutex_lock(&ctrl->namespaces_lock); ++ list_splice_init_rcu(&ctrl->namespaces, &ns_list, synchronize_rcu); ++ mutex_unlock(&ctrl->namespaces_lock); ++ synchronize_srcu(&ctrl->srcu); + + list_for_each_entry_safe(ns, next, &ns_list, list) + nvme_ns_remove(ns); +@@ -4415,6 +4425,7 @@ static void nvme_free_ctrl(struct device *dev) + + nvme_free_cels(ctrl); + nvme_mpath_uninit(ctrl); ++ cleanup_srcu_struct(&ctrl->srcu); + nvme_auth_stop(ctrl); + nvme_auth_free(ctrl); + __free_page(ctrl->discard_page); +@@ -4446,10 +4457,15 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, + WRITE_ONCE(ctrl->state, NVME_CTRL_NEW); + clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); + spin_lock_init(&ctrl->lock); ++ mutex_init(&ctrl->namespaces_lock); ++ ++ ret = init_srcu_struct(&ctrl->srcu); ++ if (ret) ++ return ret; ++ + mutex_init(&ctrl->scan_lock); + INIT_LIST_HEAD(&ctrl->namespaces); + xa_init(&ctrl->cels); +- init_rwsem(&ctrl->namespaces_rwsem); + ctrl->dev = dev; + ctrl->ops = ops; + ctrl->quirks = quirks; +@@ -4528,6 +4544,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, + out: + if (ctrl->discard_page) + __free_page(ctrl->discard_page); ++ cleanup_srcu_struct(&ctrl->srcu); + return ret; + } + EXPORT_SYMBOL_GPL(nvme_init_ctrl); +@@ -4536,22 +4553,24 @@ EXPORT_SYMBOL_GPL(nvme_init_ctrl); + void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl) + { + struct nvme_ns *ns; ++ int srcu_idx; + +- down_read(&ctrl->namespaces_rwsem); +- list_for_each_entry(ns, &ctrl->namespaces, list) ++ srcu_idx = srcu_read_lock(&ctrl->srcu); ++ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + blk_mark_disk_dead(ns->disk); +- up_read(&ctrl->namespaces_rwsem); ++ srcu_read_unlock(&ctrl->srcu, srcu_idx); + } + EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead); + + void nvme_unfreeze(struct nvme_ctrl *ctrl) + { + struct nvme_ns *ns; ++ int srcu_idx; + +- down_read(&ctrl->namespaces_rwsem); +- list_for_each_entry(ns, &ctrl->namespaces, list) ++ srcu_idx = srcu_read_lock(&ctrl->srcu); ++ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + blk_mq_unfreeze_queue(ns->queue); +- up_read(&ctrl->namespaces_rwsem); ++ srcu_read_unlock(&ctrl->srcu, srcu_idx); + clear_bit(NVME_CTRL_FROZEN, &ctrl->flags); + } + EXPORT_SYMBOL_GPL(nvme_unfreeze); +@@ -4559,14 +4578,15 @@ EXPORT_SYMBOL_GPL(nvme_unfreeze); + int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) + { + struct nvme_ns *ns; ++ int srcu_idx; + +- down_read(&ctrl->namespaces_rwsem); +- list_for_each_entry(ns, &ctrl->namespaces, list) { ++ srcu_idx = srcu_read_lock(&ctrl->srcu); ++ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { + timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); + if (timeout <= 0) + break; + } +- up_read(&ctrl->namespaces_rwsem); ++ srcu_read_unlock(&ctrl->srcu, srcu_idx); + return timeout; + } + EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); +@@ -4574,23 +4594,25 @@ EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); + void nvme_wait_freeze(struct nvme_ctrl *ctrl) + { + struct nvme_ns *ns; ++ int srcu_idx; + +- down_read(&ctrl->namespaces_rwsem); +- list_for_each_entry(ns, &ctrl->namespaces, list) ++ srcu_idx = srcu_read_lock(&ctrl->srcu); ++ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + blk_mq_freeze_queue_wait(ns->queue); +- up_read(&ctrl->namespaces_rwsem); ++ srcu_read_unlock(&ctrl->srcu, srcu_idx); + } + EXPORT_SYMBOL_GPL(nvme_wait_freeze); + + void nvme_start_freeze(struct nvme_ctrl *ctrl) + { + struct nvme_ns *ns; ++ int srcu_idx; + + set_bit(NVME_CTRL_FROZEN, &ctrl->flags); +- down_read(&ctrl->namespaces_rwsem); +- list_for_each_entry(ns, &ctrl->namespaces, list) ++ srcu_idx = srcu_read_lock(&ctrl->srcu); ++ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + blk_freeze_queue_start(ns->queue); +- up_read(&ctrl->namespaces_rwsem); ++ srcu_read_unlock(&ctrl->srcu, srcu_idx); + } + EXPORT_SYMBOL_GPL(nvme_start_freeze); + +@@ -4633,11 +4655,12 @@ EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue); + void nvme_sync_io_queues(struct nvme_ctrl *ctrl) + { + struct nvme_ns *ns; ++ int srcu_idx; + +- down_read(&ctrl->namespaces_rwsem); +- list_for_each_entry(ns, &ctrl->namespaces, list) ++ srcu_idx = srcu_read_lock(&ctrl->srcu); ++ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + blk_sync_queue(ns->queue); +- up_read(&ctrl->namespaces_rwsem); ++ srcu_read_unlock(&ctrl->srcu, srcu_idx); + } + EXPORT_SYMBOL_GPL(nvme_sync_io_queues); + +diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c +index 4939ed35638f1..875dee6ecd408 100644 +--- a/drivers/nvme/host/ioctl.c ++++ b/drivers/nvme/host/ioctl.c +@@ -921,15 +921,15 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp, + bool open_for_write) + { + struct nvme_ns *ns; +- int ret; ++ int ret, srcu_idx; + +- down_read(&ctrl->namespaces_rwsem); ++ srcu_idx = srcu_read_lock(&ctrl->srcu); + if (list_empty(&ctrl->namespaces)) { + ret = -ENOTTY; + goto out_unlock; + } + +- ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); ++ ns = list_first_or_null_rcu(&ctrl->namespaces, struct nvme_ns, list); + if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { + dev_warn(ctrl->device, + "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); +@@ -939,15 +939,18 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp, + + dev_warn(ctrl->device, + "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); +- kref_get(&ns->kref); +- up_read(&ctrl->namespaces_rwsem); ++ if (!nvme_get_ns(ns)) { ++ ret = -ENXIO; ++ goto out_unlock; ++ } ++ srcu_read_unlock(&ctrl->srcu, srcu_idx); + + ret = nvme_user_cmd(ctrl, ns, argp, 0, open_for_write); + nvme_put_ns(ns); + return ret; + + out_unlock: +- up_read(&ctrl->namespaces_rwsem); ++ srcu_read_unlock(&ctrl->srcu, srcu_idx); + return ret; + } + +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c +index 6515fa537ee53..645a6b1322205 100644 +--- a/drivers/nvme/host/multipath.c ++++ b/drivers/nvme/host/multipath.c +@@ -151,16 +151,17 @@ void nvme_mpath_end_request(struct request *rq) + void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) + { + struct nvme_ns *ns; ++ int srcu_idx; + +- down_read(&ctrl->namespaces_rwsem); +- list_for_each_entry(ns, &ctrl->namespaces, list) { ++ srcu_idx = srcu_read_lock(&ctrl->srcu); ++ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { + if (!ns->head->disk) + continue; + kblockd_schedule_work(&ns->head->requeue_work); + if (ctrl->state == NVME_CTRL_LIVE) + disk_uevent(ns->head->disk, KOBJ_CHANGE); + } +- up_read(&ctrl->namespaces_rwsem); ++ srcu_read_unlock(&ctrl->srcu, srcu_idx); + } + + static const char *nvme_ana_state_names[] = { +@@ -194,13 +195,14 @@ bool nvme_mpath_clear_current_path(struct nvme_ns *ns) + void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) + { + struct nvme_ns *ns; ++ int srcu_idx; + +- down_read(&ctrl->namespaces_rwsem); +- list_for_each_entry(ns, &ctrl->namespaces, list) { ++ srcu_idx = srcu_read_lock(&ctrl->srcu); ++ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { + nvme_mpath_clear_current_path(ns); + kblockd_schedule_work(&ns->head->requeue_work); + } +- up_read(&ctrl->namespaces_rwsem); ++ srcu_read_unlock(&ctrl->srcu, srcu_idx); + } + + void nvme_mpath_revalidate_paths(struct nvme_ns *ns) +@@ -679,6 +681,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl, + u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0; + unsigned *nr_change_groups = data; + struct nvme_ns *ns; ++ int srcu_idx; + + dev_dbg(ctrl->device, "ANA group %d: %s.\n", + le32_to_cpu(desc->grpid), +@@ -690,8 +693,8 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl, + if (!nr_nsids) + return 0; + +- down_read(&ctrl->namespaces_rwsem); +- list_for_each_entry(ns, &ctrl->namespaces, list) { ++ srcu_idx = srcu_read_lock(&ctrl->srcu); ++ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { + unsigned nsid; + again: + nsid = le32_to_cpu(desc->nsids[n]); +@@ -704,7 +707,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl, + if (ns->head->ns_id > nsid) + goto again; + } +- up_read(&ctrl->namespaces_rwsem); ++ srcu_read_unlock(&ctrl->srcu, srcu_idx); + return 0; + } + +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h +index 21c24cd8b1e8a..d2b6975e71fbc 100644 +--- a/drivers/nvme/host/nvme.h ++++ b/drivers/nvme/host/nvme.h +@@ -280,7 +280,8 @@ struct nvme_ctrl { + struct blk_mq_tag_set *tagset; + struct blk_mq_tag_set *admin_tagset; + struct list_head namespaces; +- struct rw_semaphore namespaces_rwsem; ++ struct mutex namespaces_lock; ++ struct srcu_struct srcu; + struct device ctrl_device; + struct device *device; /* char device */ + #ifdef CONFIG_NVME_HWMON +@@ -1126,6 +1127,7 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, + struct nvme_command *cmd, int status); + struct nvme_ctrl *nvme_ctrl_from_file(struct file *file); + struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid); ++bool nvme_get_ns(struct nvme_ns *ns); + void nvme_put_ns(struct nvme_ns *ns); + + static inline bool nvme_multi_css(struct nvme_ctrl *ctrl) +diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c +index 4597bca43a6d8..a6d55ebb82382 100644 +--- a/drivers/nvme/target/rdma.c ++++ b/drivers/nvme/target/rdma.c +@@ -473,12 +473,8 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) + return 0; + + out_free: +- while (--i >= 0) { +- struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; +- +- list_del(&rsp->free_list); +- nvmet_rdma_free_rsp(ndev, rsp); +- } ++ while (--i >= 0) ++ nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); + kfree(queue->rsps); + out: + return ret; +@@ -489,12 +485,8 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) + struct nvmet_rdma_device *ndev = queue->dev; + int i, nr_rsps = queue->recv_queue_size * 2; + +- for (i = 0; i < nr_rsps; i++) { +- struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; +- +- list_del(&rsp->free_list); +- nvmet_rdma_free_rsp(ndev, rsp); +- } ++ for (i = 0; i < nr_rsps; i++) ++ nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); + kfree(queue->rsps); + } + +diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c +index 3d302815c6f36..c65a1f4421f60 100644 +--- a/drivers/nvme/target/tcp.c ++++ b/drivers/nvme/target/tcp.c +@@ -875,6 +875,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) + pr_err("bad nvme-tcp pdu length (%d)\n", + le32_to_cpu(icreq->hdr.plen)); + nvmet_tcp_fatal_error(queue); ++ return -EPROTO; + } + + if (icreq->pfv != NVME_TCP_PFV_1_0) { +diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c +index bff454d46255b..6ee1f3db81d04 100644 +--- a/drivers/nvme/target/trace.c ++++ b/drivers/nvme/target/trace.c +@@ -211,7 +211,7 @@ const char *nvmet_trace_disk_name(struct trace_seq *p, char *name) + return ret; + } + +-const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl) ++const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id) + { + const char *ret = trace_seq_buffer_ptr(p); + +@@ -224,8 +224,8 @@ const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl) + * If we can know the extra data of the connect command in this stage, + * we can update this print statement later. + */ +- if (ctrl) +- trace_seq_printf(p, "%d", ctrl->cntlid); ++ if (ctrl_id) ++ trace_seq_printf(p, "%d", ctrl_id); + else + trace_seq_printf(p, "_"); + trace_seq_putc(p, 0); +diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h +index 974d99d47f514..7f7ebf9558e50 100644 +--- a/drivers/nvme/target/trace.h ++++ b/drivers/nvme/target/trace.h +@@ -32,18 +32,24 @@ const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p, u8 fctype, + nvmet_trace_parse_nvm_cmd(p, opcode, cdw10) : \ + nvmet_trace_parse_admin_cmd(p, opcode, cdw10))) + +-const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl); +-#define __print_ctrl_name(ctrl) \ +- nvmet_trace_ctrl_name(p, ctrl) ++const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id); ++#define __print_ctrl_id(ctrl_id) \ ++ nvmet_trace_ctrl_id(p, ctrl_id) + + const char *nvmet_trace_disk_name(struct trace_seq *p, char *name); + #define __print_disk_name(name) \ + nvmet_trace_disk_name(p, name) + + #ifndef TRACE_HEADER_MULTI_READ +-static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req) ++static inline u16 nvmet_req_to_ctrl_id(struct nvmet_req *req) + { +- return req->sq->ctrl; ++ /* ++ * The queue and controller pointers are not valid until an association ++ * has been established. ++ */ ++ if (!req->sq || !req->sq->ctrl) ++ return 0; ++ return req->sq->ctrl->cntlid; + } + + static inline void __assign_req_name(char *name, struct nvmet_req *req) +@@ -62,7 +68,7 @@ TRACE_EVENT(nvmet_req_init, + TP_ARGS(req, cmd), + TP_STRUCT__entry( + __field(struct nvme_command *, cmd) +- __field(struct nvmet_ctrl *, ctrl) ++ __field(u16, ctrl_id) + __array(char, disk, DISK_NAME_LEN) + __field(int, qid) + __field(u16, cid) +@@ -75,7 +81,7 @@ TRACE_EVENT(nvmet_req_init, + ), + TP_fast_assign( + __entry->cmd = cmd; +- __entry->ctrl = nvmet_req_to_ctrl(req); ++ __entry->ctrl_id = nvmet_req_to_ctrl_id(req); + __assign_req_name(__entry->disk, req); + __entry->qid = req->sq->qid; + __entry->cid = cmd->common.command_id; +@@ -89,7 +95,7 @@ TRACE_EVENT(nvmet_req_init, + ), + TP_printk("nvmet%s: %sqid=%d, cmdid=%u, nsid=%u, flags=%#x, " + "meta=%#llx, cmd=(%s, %s)", +- __print_ctrl_name(__entry->ctrl), ++ __print_ctrl_id(__entry->ctrl_id), + __print_disk_name(__entry->disk), + __entry->qid, __entry->cid, __entry->nsid, + __entry->flags, __entry->metadata, +@@ -103,7 +109,7 @@ TRACE_EVENT(nvmet_req_complete, + TP_PROTO(struct nvmet_req *req), + TP_ARGS(req), + TP_STRUCT__entry( +- __field(struct nvmet_ctrl *, ctrl) ++ __field(u16, ctrl_id) + __array(char, disk, DISK_NAME_LEN) + __field(int, qid) + __field(int, cid) +@@ -111,7 +117,7 @@ TRACE_EVENT(nvmet_req_complete, + __field(u16, status) + ), + TP_fast_assign( +- __entry->ctrl = nvmet_req_to_ctrl(req); ++ __entry->ctrl_id = nvmet_req_to_ctrl_id(req); + __entry->qid = req->cq->qid; + __entry->cid = req->cqe->command_id; + __entry->result = le64_to_cpu(req->cqe->result.u64); +@@ -119,7 +125,7 @@ TRACE_EVENT(nvmet_req_complete, + __assign_req_name(__entry->disk, req); + ), + TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x", +- __print_ctrl_name(__entry->ctrl), ++ __print_ctrl_id(__entry->ctrl_id), + __print_disk_name(__entry->disk), + __entry->qid, __entry->cid, __entry->result, __entry->status) + +diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c +index 9ce0d20a6c581..feef537257d05 100644 +--- a/drivers/parisc/ccio-dma.c ++++ b/drivers/parisc/ccio-dma.c +@@ -1022,7 +1022,7 @@ static const struct dma_map_ops ccio_ops = { + .map_sg = ccio_map_sg, + .unmap_sg = ccio_unmap_sg, + .get_sgtable = dma_common_get_sgtable, +- .alloc_pages = dma_common_alloc_pages, ++ .alloc_pages_op = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + }; + +diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c +index 05e7103d1d407..6f5b919280ff0 100644 +--- a/drivers/parisc/sba_iommu.c ++++ b/drivers/parisc/sba_iommu.c +@@ -1090,7 +1090,7 @@ static const struct dma_map_ops sba_ops = { + .map_sg = sba_map_sg, + .unmap_sg = sba_unmap_sg, + .get_sgtable = dma_common_get_sgtable, +- .alloc_pages = dma_common_alloc_pages, ++ .alloc_pages_op = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + }; + +diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c +index 7fc602e01487d..7e89f547999b2 100644 +--- a/drivers/platform/surface/aggregator/controller.c ++++ b/drivers/platform/surface/aggregator/controller.c +@@ -1354,7 +1354,8 @@ void ssam_controller_destroy(struct ssam_controller *ctrl) + if (ctrl->state == SSAM_CONTROLLER_UNINITIALIZED) + return; + +- WARN_ON(ctrl->state != SSAM_CONTROLLER_STOPPED); ++ WARN_ON(ctrl->state != SSAM_CONTROLLER_STOPPED && ++ ctrl->state != SSAM_CONTROLLER_INITIALIZED); + + /* + * Note: New events could still have been received after the previous +diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c +index cefd0d886cfd4..53d957d4eea4d 100644 +--- a/drivers/platform/x86/intel/ifs/load.c ++++ b/drivers/platform/x86/intel/ifs/load.c +@@ -260,6 +260,7 @@ int ifs_load_firmware(struct device *dev) + { + const struct ifs_test_caps *test = ifs_get_test_caps(dev); + struct ifs_data *ifsd = ifs_get_data(dev); ++ unsigned int expected_size; + const struct firmware *fw; + char scan_path[64]; + int ret = -EINVAL; +@@ -274,6 +275,14 @@ int ifs_load_firmware(struct device *dev) + goto done; + } + ++ expected_size = ((struct microcode_header_intel *)fw->data)->totalsize; ++ if (fw->size != expected_size) { ++ dev_err(dev, "File size mismatch (expected %u, actual %zu). Corrupted IFS image.\n", ++ expected_size, fw->size); ++ ret = -EINVAL; ++ goto release; ++ } ++ + ret = image_sanity_check(dev, (struct microcode_header_intel *)fw->data); + if (ret) + goto release; +diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c +index a1e27334cdf54..78c48a1f9c68a 100644 +--- a/drivers/platform/x86/lg-laptop.c ++++ b/drivers/platform/x86/lg-laptop.c +@@ -715,7 +715,7 @@ static int acpi_add(struct acpi_device *device) + default: + year = 2019; + } +- pr_info("product: %s year: %d\n", product, year); ++ pr_info("product: %s year: %d\n", product ?: "unknown", year); + + if (year >= 2019) + battery_limit_use_wmbb = 1; +diff --git a/drivers/pmdomain/imx/imx93-pd.c b/drivers/pmdomain/imx/imx93-pd.c +index b9e60d136875a..660d00d98ecc1 100644 +--- a/drivers/pmdomain/imx/imx93-pd.c ++++ b/drivers/pmdomain/imx/imx93-pd.c +@@ -20,6 +20,7 @@ + #define FUNC_STAT_PSW_STAT_MASK BIT(0) + #define FUNC_STAT_RST_STAT_MASK BIT(2) + #define FUNC_STAT_ISO_STAT_MASK BIT(4) ++#define FUNC_STAT_SSAR_STAT_MASK BIT(8) + + struct imx93_power_domain { + struct generic_pm_domain genpd; +@@ -50,7 +51,7 @@ static int imx93_pd_on(struct generic_pm_domain *genpd) + writel(val, addr + MIX_SLICE_SW_CTRL_OFF); + + ret = readl_poll_timeout(addr + MIX_FUNC_STAT_OFF, val, +- !(val & FUNC_STAT_ISO_STAT_MASK), 1, 10000); ++ !(val & FUNC_STAT_SSAR_STAT_MASK), 1, 10000); + if (ret) { + dev_err(domain->dev, "pd_on timeout: name: %s, stat: %x\n", genpd->name, val); + return ret; +@@ -72,7 +73,7 @@ static int imx93_pd_off(struct generic_pm_domain *genpd) + writel(val, addr + MIX_SLICE_SW_CTRL_OFF); + + ret = readl_poll_timeout(addr + MIX_FUNC_STAT_OFF, val, +- val & FUNC_STAT_PSW_STAT_MASK, 1, 1000); ++ val & FUNC_STAT_PSW_STAT_MASK, 1, 10000); + if (ret) { + dev_err(domain->dev, "pd_off timeout: name: %s, stat: %x\n", genpd->name, val); + return ret; +diff --git a/drivers/pmdomain/imx/scu-pd.c b/drivers/pmdomain/imx/scu-pd.c +index 891c1d925a9de..368918e562f55 100644 +--- a/drivers/pmdomain/imx/scu-pd.c ++++ b/drivers/pmdomain/imx/scu-pd.c +@@ -223,11 +223,6 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = { + { "lvds1-pwm", IMX_SC_R_LVDS_1_PWM_0, 1, false, 0 }, + { "lvds1-lpi2c", IMX_SC_R_LVDS_1_I2C_0, 2, true, 0 }, + +- { "mipi1", IMX_SC_R_MIPI_1, 1, 0 }, +- { "mipi1-pwm0", IMX_SC_R_MIPI_1_PWM_0, 1, 0 }, +- { "mipi1-i2c", IMX_SC_R_MIPI_1_I2C_0, 2, 1 }, +- { "lvds1", IMX_SC_R_LVDS_1, 1, 0 }, +- + /* DC SS */ + { "dc0", IMX_SC_R_DC_0, 1, false, 0 }, + { "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 }, +diff --git a/drivers/rtc/rtc-nct3018y.c b/drivers/rtc/rtc-nct3018y.c +index ed4e606be8e58..c4533c0f53896 100644 +--- a/drivers/rtc/rtc-nct3018y.c ++++ b/drivers/rtc/rtc-nct3018y.c +@@ -99,6 +99,8 @@ static int nct3018y_get_alarm_mode(struct i2c_client *client, unsigned char *ala + if (flags < 0) + return flags; + *alarm_enable = flags & NCT3018Y_BIT_AIE; ++ dev_dbg(&client->dev, "%s:alarm_enable:%x\n", __func__, *alarm_enable); ++ + } + + if (alarm_flag) { +@@ -107,11 +109,9 @@ static int nct3018y_get_alarm_mode(struct i2c_client *client, unsigned char *ala + if (flags < 0) + return flags; + *alarm_flag = flags & NCT3018Y_BIT_AF; ++ dev_dbg(&client->dev, "%s:alarm_flag:%x\n", __func__, *alarm_flag); + } + +- dev_dbg(&client->dev, "%s:alarm_enable:%x alarm_flag:%x\n", +- __func__, *alarm_enable, *alarm_flag); +- + return 0; + } + +diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c +index 6f97456798917..0bd880f5475b1 100644 +--- a/drivers/s390/block/dasd.c ++++ b/drivers/s390/block/dasd.c +@@ -1599,9 +1599,15 @@ static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb) + if (!sense) + return 0; + +- return !!(sense[1] & SNS1_NO_REC_FOUND) || +- !!(sense[1] & SNS1_FILE_PROTECTED) || +- scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN; ++ if (sense[1] & SNS1_NO_REC_FOUND) ++ return 1; ++ ++ if ((sense[1] & SNS1_INV_TRACK_FORMAT) && ++ scsw_is_tm(&irb->scsw) && ++ !(sense[2] & SNS2_ENV_DATA_PRESENT)) ++ return 1; ++ ++ return 0; + } + + static int dasd_ese_oos_cond(u8 *sense) +@@ -1622,7 +1628,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, + struct dasd_device *device; + unsigned long now; + int nrf_suppressed = 0; +- int fp_suppressed = 0; ++ int it_suppressed = 0; + struct request *req; + u8 *sense = NULL; + int expires; +@@ -1677,8 +1683,9 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, + */ + sense = dasd_get_sense(irb); + if (sense) { +- fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) && +- test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); ++ it_suppressed = (sense[1] & SNS1_INV_TRACK_FORMAT) && ++ !(sense[2] & SNS2_ENV_DATA_PRESENT) && ++ test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags); + nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) && + test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); + +@@ -1693,7 +1700,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, + return; + } + } +- if (!(fp_suppressed || nrf_suppressed)) ++ if (!(it_suppressed || nrf_suppressed)) + device->discipline->dump_sense_dbf(device, irb, "int"); + + if (device->features & DASD_FEATURE_ERPLOG) +@@ -2465,14 +2472,17 @@ static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) + rc = 0; + list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { + /* +- * In some cases the 'File Protected' or 'Incorrect Length' +- * error might be expected and error recovery would be +- * unnecessary in these cases. Check if the according suppress +- * bit is set. ++ * In some cases certain errors might be expected and ++ * error recovery would be unnecessary in these cases. ++ * Check if the according suppress bit is set. + */ + sense = dasd_get_sense(&cqr->irb); +- if (sense && sense[1] & SNS1_FILE_PROTECTED && +- test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags)) ++ if (sense && (sense[1] & SNS1_INV_TRACK_FORMAT) && ++ !(sense[2] & SNS2_ENV_DATA_PRESENT) && ++ test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags)) ++ continue; ++ if (sense && (sense[1] & SNS1_NO_REC_FOUND) && ++ test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags)) + continue; + if (scsw_cstat(&cqr->irb.scsw) == 0x40 && + test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags)) +diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c +index 89957bb7244d2..07f886029a358 100644 +--- a/drivers/s390/block/dasd_3990_erp.c ++++ b/drivers/s390/block/dasd_3990_erp.c +@@ -1406,14 +1406,8 @@ dasd_3990_erp_file_prot(struct dasd_ccw_req * erp) + + struct dasd_device *device = erp->startdev; + +- /* +- * In some cases the 'File Protected' error might be expected and +- * log messages shouldn't be written then. +- * Check if the according suppress bit is set. +- */ +- if (!test_bit(DASD_CQR_SUPPRESS_FP, &erp->flags)) +- dev_err(&device->cdev->dev, +- "Accessing the DASD failed because of a hardware error\n"); ++ dev_err(&device->cdev->dev, ++ "Accessing the DASD failed because of a hardware error\n"); + + return dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); + +diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c +index 2e4e555b37c33..12db1046aad0f 100644 +--- a/drivers/s390/block/dasd_diag.c ++++ b/drivers/s390/block/dasd_diag.c +@@ -639,7 +639,6 @@ static void dasd_diag_setup_blk_queue(struct dasd_block *block) + /* With page sized segments each segment can be translated into one idaw/tidaw */ + blk_queue_max_segment_size(q, PAGE_SIZE); + blk_queue_segment_boundary(q, PAGE_SIZE - 1); +- blk_queue_dma_alignment(q, PAGE_SIZE - 1); + } + + static int dasd_diag_pe_handler(struct dasd_device *device, +diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c +index bd89b032968a4..d9fb7f097b7e5 100644 +--- a/drivers/s390/block/dasd_eckd.c ++++ b/drivers/s390/block/dasd_eckd.c +@@ -2289,6 +2289,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device) + cqr->status = DASD_CQR_FILLED; + /* Set flags to suppress output for expected errors */ + set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); ++ set_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags); + + return cqr; + } +@@ -2570,7 +2571,6 @@ dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata, + cqr->buildclk = get_tod_clock(); + cqr->status = DASD_CQR_FILLED; + /* Set flags to suppress output for expected errors */ +- set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); + set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); + + return cqr; +@@ -4146,8 +4146,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( + + /* Set flags to suppress output for expected errors */ + if (dasd_eckd_is_ese(basedev)) { +- set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); +- set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); + set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); + } + +@@ -4649,9 +4647,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( + + /* Set flags to suppress output for expected errors */ + if (dasd_eckd_is_ese(basedev)) { +- set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); +- set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); + set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); ++ set_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags); + } + + return cqr; +@@ -5820,36 +5817,32 @@ static void dasd_eckd_dump_sense(struct dasd_device *device, + { + u8 *sense = dasd_get_sense(irb); + +- if (scsw_is_tm(&irb->scsw)) { +- /* +- * In some cases the 'File Protected' or 'Incorrect Length' +- * error might be expected and log messages shouldn't be written +- * then. Check if the according suppress bit is set. +- */ +- if (sense && (sense[1] & SNS1_FILE_PROTECTED) && +- test_bit(DASD_CQR_SUPPRESS_FP, &req->flags)) +- return; +- if (scsw_cstat(&irb->scsw) == 0x40 && +- test_bit(DASD_CQR_SUPPRESS_IL, &req->flags)) +- return; ++ /* ++ * In some cases certain errors might be expected and ++ * log messages shouldn't be written then. ++ * Check if the according suppress bit is set. ++ */ ++ if (sense && (sense[1] & SNS1_INV_TRACK_FORMAT) && ++ !(sense[2] & SNS2_ENV_DATA_PRESENT) && ++ test_bit(DASD_CQR_SUPPRESS_IT, &req->flags)) ++ return; + +- dasd_eckd_dump_sense_tcw(device, req, irb); +- } else { +- /* +- * In some cases the 'Command Reject' or 'No Record Found' +- * error might be expected and log messages shouldn't be +- * written then. Check if the according suppress bit is set. +- */ +- if (sense && sense[0] & SNS0_CMD_REJECT && +- test_bit(DASD_CQR_SUPPRESS_CR, &req->flags)) +- return; ++ if (sense && sense[0] & SNS0_CMD_REJECT && ++ test_bit(DASD_CQR_SUPPRESS_CR, &req->flags)) ++ return; + +- if (sense && sense[1] & SNS1_NO_REC_FOUND && +- test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags)) +- return; ++ if (sense && sense[1] & SNS1_NO_REC_FOUND && ++ test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags)) ++ return; + ++ if (scsw_cstat(&irb->scsw) == 0x40 && ++ test_bit(DASD_CQR_SUPPRESS_IL, &req->flags)) ++ return; ++ ++ if (scsw_is_tm(&irb->scsw)) ++ dasd_eckd_dump_sense_tcw(device, req, irb); ++ else + dasd_eckd_dump_sense_ccw(device, req, irb); +- } + } + + static int dasd_eckd_reload_device(struct dasd_device *device) +@@ -6895,7 +6888,6 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block) + /* With page sized segments each segment can be translated into one idaw/tidaw */ + blk_queue_max_segment_size(q, PAGE_SIZE); + blk_queue_segment_boundary(q, PAGE_SIZE - 1); +- blk_queue_dma_alignment(q, PAGE_SIZE - 1); + } + + static struct ccw_driver dasd_eckd_driver = { +diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h +index 8a4dbe9d77411..fa5e070fd0c1c 100644 +--- a/drivers/s390/block/dasd_int.h ++++ b/drivers/s390/block/dasd_int.h +@@ -225,7 +225,7 @@ struct dasd_ccw_req { + * The following flags are used to suppress output of certain errors. + */ + #define DASD_CQR_SUPPRESS_NRF 4 /* Suppress 'No Record Found' error */ +-#define DASD_CQR_SUPPRESS_FP 5 /* Suppress 'File Protected' error*/ ++#define DASD_CQR_SUPPRESS_IT 5 /* Suppress 'Invalid Track' error*/ + #define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */ + #define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */ + +diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c +index 45f9c0736be4f..e5f28370a9039 100644 +--- a/drivers/s390/cio/idset.c ++++ b/drivers/s390/cio/idset.c +@@ -16,20 +16,21 @@ struct idset { + unsigned long bitmap[]; + }; + +-static inline unsigned long bitmap_size(int num_ssid, int num_id) ++static inline unsigned long idset_bitmap_size(int num_ssid, int num_id) + { +- return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long); ++ return bitmap_size(size_mul(num_ssid, num_id)); + } + + static struct idset *idset_new(int num_ssid, int num_id) + { + struct idset *set; + +- set = vmalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id)); ++ set = vmalloc(sizeof(struct idset) + ++ idset_bitmap_size(num_ssid, num_id)); + if (set) { + set->num_ssid = num_ssid; + set->num_id = num_id; +- memset(set->bitmap, 0, bitmap_size(num_ssid, num_id)); ++ memset(set->bitmap, 0, idset_bitmap_size(num_ssid, num_id)); + } + return set; + } +@@ -41,7 +42,8 @@ void idset_free(struct idset *set) + + void idset_fill(struct idset *set) + { +- memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id)); ++ memset(set->bitmap, 0xff, ++ idset_bitmap_size(set->num_ssid, set->num_id)); + } + + static inline void idset_add(struct idset *set, int ssid, int id) +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c +index 5af669b930193..9cd22588c8eb3 100644 +--- a/drivers/scsi/lpfc/lpfc_sli.c ++++ b/drivers/scsi/lpfc/lpfc_sli.c +@@ -7577,7 +7577,7 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, + struct lpfc_sglq *sglq_entry = NULL; + struct lpfc_sglq *sglq_entry_next = NULL; + struct lpfc_sglq *sglq_entry_first = NULL; +- int status, total_cnt; ++ int status = 0, total_cnt; + int post_cnt = 0, num_posted = 0, block_cnt = 0; + int last_xritag = NO_XRI; + LIST_HEAD(prep_sgl_list); +diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c +index 2442d4d2e3f38..f668c1c0a98f2 100644 +--- a/drivers/scsi/scsi_transport_spi.c ++++ b/drivers/scsi/scsi_transport_spi.c +@@ -676,10 +676,10 @@ spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer, + for (r = 0; r < retries; r++) { + result = spi_execute(sdev, spi_write_buffer, REQ_OP_DRV_OUT, + buffer, len, &sshdr); +- if(result || !scsi_device_online(sdev)) { ++ if (result || !scsi_device_online(sdev)) { + + scsi_device_set_state(sdev, SDEV_QUIESCE); +- if (scsi_sense_valid(&sshdr) ++ if (result > 0 && scsi_sense_valid(&sshdr) + && sshdr.sense_key == ILLEGAL_REQUEST + /* INVALID FIELD IN CDB */ + && sshdr.asc == 0x24 && sshdr.ascq == 0x00) +diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c +index 0c736d51566dc..070a99a4180cc 100644 +--- a/drivers/ssb/main.c ++++ b/drivers/ssb/main.c +@@ -839,7 +839,7 @@ static u32 clkfactor_f6_resolve(u32 v) + case SSB_CHIPCO_CLK_F6_7: + return 7; + } +- return 0; ++ return 1; + } + + /* Calculate the speed the backplane would run at a given set of clockcontrol values */ +diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c +index 06de5823eb8e3..d38c4674cd6d7 100644 +--- a/drivers/staging/iio/resolver/ad2s1210.c ++++ b/drivers/staging/iio/resolver/ad2s1210.c +@@ -658,9 +658,6 @@ static int ad2s1210_probe(struct spi_device *spi) + if (!indio_dev) + return -ENOMEM; + st = iio_priv(indio_dev); +- ret = ad2s1210_setup_gpios(st); +- if (ret < 0) +- return ret; + + spi_set_drvdata(spi, indio_dev); + +@@ -671,6 +668,10 @@ static int ad2s1210_probe(struct spi_device *spi) + st->resolution = 12; + st->fexcit = AD2S1210_DEF_EXCIT; + ++ ret = ad2s1210_setup_gpios(st); ++ if (ret < 0) ++ return ret; ++ + indio_dev->info = &ad2s1210_info; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = ad2s1210_channels; +diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c +index 9fb118e77a1f0..f1d44e4955fc6 100644 +--- a/drivers/staging/ks7010/ks7010_sdio.c ++++ b/drivers/staging/ks7010/ks7010_sdio.c +@@ -395,9 +395,9 @@ int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size, + priv->hostt.buff[priv->hostt.qtail] = le16_to_cpu(hdr->event); + priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE; + +- spin_lock(&priv->tx_dev.tx_dev_lock); ++ spin_lock_bh(&priv->tx_dev.tx_dev_lock); + result = enqueue_txdev(priv, p, size, complete_handler, skb); +- spin_unlock(&priv->tx_dev.tx_dev_lock); ++ spin_unlock_bh(&priv->tx_dev.tx_dev_lock); + + if (txq_has_space(priv)) + queue_delayed_work(priv->wq, &priv->rw_dwork, 0); +diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c +index de0f9cd44c231..f9f40c0e9addc 100644 +--- a/drivers/thunderbolt/switch.c ++++ b/drivers/thunderbolt/switch.c +@@ -3159,6 +3159,7 @@ void tb_switch_remove(struct tb_switch *sw) + tb_switch_remove(port->remote->sw); + port->remote = NULL; + } else if (port->xdomain) { ++ port->xdomain->is_unplugged = true; + tb_xdomain_remove(port->xdomain); + port->xdomain = NULL; + } +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c +index 88cdafa5ac541..bcca5627afaca 100644 +--- a/drivers/tty/serial/atmel_serial.c ++++ b/drivers/tty/serial/atmel_serial.c +@@ -2522,7 +2522,7 @@ static const struct uart_ops atmel_pops = { + }; + + static const struct serial_rs485 atmel_rs485_supported = { +- .flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND | SER_RS485_RX_DURING_TX, ++ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX, + .delay_rts_before_send = 1, + .delay_rts_after_send = 1, + }; +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c +index 385b41275e8b7..8bd0f8e45b146 100644 +--- a/drivers/tty/serial/fsl_lpuart.c ++++ b/drivers/tty/serial/fsl_lpuart.c +@@ -2930,6 +2930,7 @@ static int lpuart_probe(struct platform_device *pdev) + pm_runtime_set_autosuspend_delay(&pdev->dev, UART_AUTOSUSPEND_TIMEOUT); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); ++ pm_runtime_mark_last_busy(&pdev->dev); + + ret = lpuart_global_reset(sport); + if (ret) +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c +index 674467b7638ea..5ce276cb39d0d 100644 +--- a/drivers/usb/dwc3/core.c ++++ b/drivers/usb/dwc3/core.c +@@ -506,6 +506,13 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc) + static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned int length) + { + struct dwc3_event_buffer *evt; ++ unsigned int hw_mode; ++ ++ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); ++ if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) { ++ dwc->ev_buf = NULL; ++ return 0; ++ } + + evt = dwc3_alloc_one_event_buffer(dwc, length); + if (IS_ERR(evt)) { +@@ -527,6 +534,9 @@ int dwc3_event_buffers_setup(struct dwc3 *dwc) + { + struct dwc3_event_buffer *evt; + ++ if (!dwc->ev_buf) ++ return 0; ++ + evt = dwc->ev_buf; + evt->lpos = 0; + dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), +@@ -544,6 +554,9 @@ void dwc3_event_buffers_cleanup(struct dwc3 *dwc) + { + struct dwc3_event_buffer *evt; + ++ if (!dwc->ev_buf) ++ return; ++ + evt = dwc->ev_buf; + + evt->lpos = 0; +diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c +index ee5705d336e3d..10a82527626eb 100644 +--- a/drivers/usb/gadget/udc/fsl_udc_core.c ++++ b/drivers/usb/gadget/udc/fsl_udc_core.c +@@ -2486,7 +2486,7 @@ static int fsl_udc_probe(struct platform_device *pdev) + /* setup the udc->eps[] for non-control endpoints and link + * to gadget.ep_list */ + for (i = 1; i < (int)(udc_controller->max_ep / 2); i++) { +- char name[14]; ++ char name[16]; + + sprintf(name, "ep%dout", i); + struct_ep_setup(udc_controller, i * 2, name, 1); +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 3c9b4ae3d33b3..f005ce1f91ca2 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -2808,7 +2808,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, + xhci->num_active_eps); + return -ENOMEM; + } +- if ((xhci->quirks & XHCI_SW_BW_CHECKING) && ++ if ((xhci->quirks & XHCI_SW_BW_CHECKING) && !ctx_change && + xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { + if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) + xhci_free_host_resources(xhci, ctrl_ctx); +@@ -4150,8 +4150,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, + mutex_unlock(&xhci->mutex); + ret = xhci_disable_slot(xhci, udev->slot_id); + xhci_free_virt_device(xhci, udev->slot_id); +- if (!ret) +- xhci_alloc_dev(hcd, udev); ++ if (!ret) { ++ if (xhci_alloc_dev(hcd, udev) == 1) ++ xhci_setup_addressable_virt_dev(xhci, udev); ++ } + kfree(command->completion); + kfree(command); + return -EPROTO; +diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c +index f14505c690f96..7db9c382c354d 100644 +--- a/drivers/usb/typec/tcpm/tcpm.c ++++ b/drivers/usb/typec/tcpm/tcpm.c +@@ -4880,7 +4880,6 @@ static void run_state_machine(struct tcpm_port *port) + break; + case PORT_RESET: + tcpm_reset_port(port); +- port->pd_events = 0; + if (port->self_powered) + tcpm_set_cc(port, TYPEC_CC_OPEN); + else +diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c +index 76f6f26265a3b..29257d2639dbf 100644 +--- a/drivers/xen/grant-dma-ops.c ++++ b/drivers/xen/grant-dma-ops.c +@@ -282,7 +282,7 @@ static int xen_grant_dma_supported(struct device *dev, u64 mask) + static const struct dma_map_ops xen_grant_dma_ops = { + .alloc = xen_grant_dma_alloc, + .free = xen_grant_dma_free, +- .alloc_pages = xen_grant_dma_alloc_pages, ++ .alloc_pages_op = xen_grant_dma_alloc_pages, + .free_pages = xen_grant_dma_free_pages, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c +index 0e6c6c25d154f..1c4ef5111651d 100644 +--- a/drivers/xen/swiotlb-xen.c ++++ b/drivers/xen/swiotlb-xen.c +@@ -403,7 +403,7 @@ const struct dma_map_ops xen_swiotlb_dma_ops = { + .dma_supported = xen_swiotlb_dma_supported, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, +- .alloc_pages = dma_common_alloc_pages, ++ .alloc_pages_op = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + .max_mapping_size = swiotlb_max_mapping_size, + }; +diff --git a/fs/afs/file.c b/fs/afs/file.c +index d37dd201752ba..0012ea300eb53 100644 +--- a/fs/afs/file.c ++++ b/fs/afs/file.c +@@ -529,13 +529,17 @@ static void afs_add_open_mmap(struct afs_vnode *vnode) + + static void afs_drop_open_mmap(struct afs_vnode *vnode) + { +- if (!atomic_dec_and_test(&vnode->cb_nr_mmap)) ++ if (atomic_add_unless(&vnode->cb_nr_mmap, -1, 1)) + return; + + down_write(&vnode->volume->cell->fs_open_mmaps_lock); + +- if (atomic_read(&vnode->cb_nr_mmap) == 0) ++ read_seqlock_excl(&vnode->cb_lock); ++ // the only place where ->cb_nr_mmap may hit 0 ++ // see __afs_break_callback() for the other side... ++ if (atomic_dec_and_test(&vnode->cb_nr_mmap)) + list_del_init(&vnode->cb_mmap_link); ++ read_sequnlock_excl(&vnode->cb_lock); + + up_write(&vnode->volume->cell->fs_open_mmaps_lock); + flush_work(&vnode->cb_work); +diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c +index 206812ce544ae..96a8b13b57d96 100644 +--- a/fs/binfmt_elf_fdpic.c ++++ b/fs/binfmt_elf_fdpic.c +@@ -320,7 +320,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm) + else + executable_stack = EXSTACK_DEFAULT; + +- if (stack_size == 0) { ++ if (stack_size == 0 && interp_params.flags & ELF_FDPIC_FLAG_PRESENT) { + stack_size = interp_params.stack_size; + if (interp_params.flags & ELF_FDPIC_FLAG_EXEC_STACK) + executable_stack = EXSTACK_ENABLE_X; +diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c +index e0108d17b085c..cf5ed5cd4102d 100644 +--- a/fs/binfmt_misc.c ++++ b/fs/binfmt_misc.c +@@ -60,12 +60,11 @@ typedef struct { + char *name; + struct dentry *dentry; + struct file *interp_file; ++ refcount_t users; /* sync removal with load_misc_binary() */ + } Node; + + static DEFINE_RWLOCK(entries_lock); + static struct file_system_type bm_fs_type; +-static struct vfsmount *bm_mnt; +-static int entry_count; + + /* + * Max length of the register string. Determined by: +@@ -82,19 +81,23 @@ static int entry_count; + */ + #define MAX_REGISTER_LENGTH 1920 + +-/* +- * Check if we support the binfmt +- * if we do, return the node, else NULL +- * locking is done in load_misc_binary ++/** ++ * search_binfmt_handler - search for a binary handler for @bprm ++ * @misc: handle to binfmt_misc instance ++ * @bprm: binary for which we are looking for a handler ++ * ++ * Search for a binary type handler for @bprm in the list of registered binary ++ * type handlers. ++ * ++ * Return: binary type list entry on success, NULL on failure + */ +-static Node *check_file(struct linux_binprm *bprm) ++static Node *search_binfmt_handler(struct linux_binprm *bprm) + { + char *p = strrchr(bprm->interp, '.'); +- struct list_head *l; ++ Node *e; + + /* Walk all the registered handlers. */ +- list_for_each(l, &entries) { +- Node *e = list_entry(l, Node, list); ++ list_for_each_entry(e, &entries, list) { + char *s; + int j; + +@@ -123,9 +126,49 @@ static Node *check_file(struct linux_binprm *bprm) + if (j == e->size) + return e; + } ++ + return NULL; + } + ++/** ++ * get_binfmt_handler - try to find a binary type handler ++ * @misc: handle to binfmt_misc instance ++ * @bprm: binary for which we are looking for a handler ++ * ++ * Try to find a binfmt handler for the binary type. If one is found take a ++ * reference to protect against removal via bm_{entry,status}_write(). ++ * ++ * Return: binary type list entry on success, NULL on failure ++ */ ++static Node *get_binfmt_handler(struct linux_binprm *bprm) ++{ ++ Node *e; ++ ++ read_lock(&entries_lock); ++ e = search_binfmt_handler(bprm); ++ if (e) ++ refcount_inc(&e->users); ++ read_unlock(&entries_lock); ++ return e; ++} ++ ++/** ++ * put_binfmt_handler - put binary handler node ++ * @e: node to put ++ * ++ * Free node syncing with load_misc_binary() and defer final free to ++ * load_misc_binary() in case it is using the binary type handler we were ++ * requested to remove. ++ */ ++static void put_binfmt_handler(Node *e) ++{ ++ if (refcount_dec_and_test(&e->users)) { ++ if (e->flags & MISC_FMT_OPEN_FILE) ++ filp_close(e->interp_file, NULL); ++ kfree(e); ++ } ++} ++ + /* + * the loader itself + */ +@@ -139,12 +182,7 @@ static int load_misc_binary(struct linux_binprm *bprm) + if (!enabled) + return retval; + +- /* to keep locking time low, we copy the interpreter string */ +- read_lock(&entries_lock); +- fmt = check_file(bprm); +- if (fmt) +- dget(fmt->dentry); +- read_unlock(&entries_lock); ++ fmt = get_binfmt_handler(bprm); + if (!fmt) + return retval; + +@@ -198,7 +236,16 @@ static int load_misc_binary(struct linux_binprm *bprm) + + retval = 0; + ret: +- dput(fmt->dentry); ++ ++ /* ++ * If we actually put the node here all concurrent calls to ++ * load_misc_binary() will have finished. We also know ++ * that for the refcount to be zero ->evict_inode() must have removed ++ * the node to be deleted from the list. All that is left for us is to ++ * close and free. ++ */ ++ put_binfmt_handler(fmt); ++ + return retval; + } + +@@ -552,30 +599,90 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode) + return inode; + } + ++/** ++ * bm_evict_inode - cleanup data associated with @inode ++ * @inode: inode to which the data is attached ++ * ++ * Cleanup the binary type handler data associated with @inode if a binary type ++ * entry is removed or the filesystem is unmounted and the super block is ++ * shutdown. ++ * ++ * If the ->evict call was not caused by a super block shutdown but by a write ++ * to remove the entry or all entries via bm_{entry,status}_write() the entry ++ * will have already been removed from the list. We keep the list_empty() check ++ * to make that explicit. ++*/ + static void bm_evict_inode(struct inode *inode) + { + Node *e = inode->i_private; + +- if (e && e->flags & MISC_FMT_OPEN_FILE) +- filp_close(e->interp_file, NULL); +- + clear_inode(inode); +- kfree(e); ++ ++ if (e) { ++ write_lock(&entries_lock); ++ if (!list_empty(&e->list)) ++ list_del_init(&e->list); ++ write_unlock(&entries_lock); ++ put_binfmt_handler(e); ++ } + } + +-static void kill_node(Node *e) ++/** ++ * unlink_binfmt_dentry - remove the dentry for the binary type handler ++ * @dentry: dentry associated with the binary type handler ++ * ++ * Do the actual filesystem work to remove a dentry for a registered binary ++ * type handler. Since binfmt_misc only allows simple files to be created ++ * directly under the root dentry of the filesystem we ensure that we are ++ * indeed passed a dentry directly beneath the root dentry, that the inode ++ * associated with the root dentry is locked, and that it is a regular file we ++ * are asked to remove. ++ */ ++static void unlink_binfmt_dentry(struct dentry *dentry) + { +- struct dentry *dentry; ++ struct dentry *parent = dentry->d_parent; ++ struct inode *inode, *parent_inode; ++ ++ /* All entries are immediate descendants of the root dentry. */ ++ if (WARN_ON_ONCE(dentry->d_sb->s_root != parent)) ++ return; + ++ /* We only expect to be called on regular files. */ ++ inode = d_inode(dentry); ++ if (WARN_ON_ONCE(!S_ISREG(inode->i_mode))) ++ return; ++ ++ /* The parent inode must be locked. */ ++ parent_inode = d_inode(parent); ++ if (WARN_ON_ONCE(!inode_is_locked(parent_inode))) ++ return; ++ ++ if (simple_positive(dentry)) { ++ dget(dentry); ++ simple_unlink(parent_inode, dentry); ++ d_delete(dentry); ++ dput(dentry); ++ } ++} ++ ++/** ++ * remove_binfmt_handler - remove a binary type handler ++ * @misc: handle to binfmt_misc instance ++ * @e: binary type handler to remove ++ * ++ * Remove a binary type handler from the list of binary type handlers and ++ * remove its associated dentry. This is called from ++ * binfmt_{entry,status}_write(). In the future, we might want to think about ++ * adding a proper ->unlink() method to binfmt_misc instead of forcing caller's ++ * to use writes to files in order to delete binary type handlers. But it has ++ * worked for so long that it's not a pressing issue. ++ */ ++static void remove_binfmt_handler(Node *e) ++{ + write_lock(&entries_lock); + list_del_init(&e->list); + write_unlock(&entries_lock); +- +- dentry = e->dentry; +- drop_nlink(d_inode(dentry)); +- d_drop(dentry); +- dput(dentry); +- simple_release_fs(&bm_mnt, &entry_count); ++ unlink_binfmt_dentry(e->dentry); + } + + /* / */ +@@ -602,8 +709,8 @@ bm_entry_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) + static ssize_t bm_entry_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) + { +- struct dentry *root; +- Node *e = file_inode(file)->i_private; ++ struct inode *inode = file_inode(file); ++ Node *e = inode->i_private; + int res = parse_command(buffer, count); + + switch (res) { +@@ -617,13 +724,22 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer, + break; + case 3: + /* Delete this handler. */ +- root = file_inode(file)->i_sb->s_root; +- inode_lock(d_inode(root)); ++ inode = d_inode(inode->i_sb->s_root); ++ inode_lock(inode); + ++ /* ++ * In order to add new element or remove elements from the list ++ * via bm_{entry,register,status}_write() inode_lock() on the ++ * root inode must be held. ++ * The lock is exclusive ensuring that the list can't be ++ * modified. Only load_misc_binary() can access but does so ++ * read-only. So we only need to take the write lock when we ++ * actually remove the entry from the list. ++ */ + if (!list_empty(&e->list)) +- kill_node(e); ++ remove_binfmt_handler(e); + +- inode_unlock(d_inode(root)); ++ inode_unlock(inode); + break; + default: + return res; +@@ -682,13 +798,7 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer, + if (!inode) + goto out2; + +- err = simple_pin_fs(&bm_fs_type, &bm_mnt, &entry_count); +- if (err) { +- iput(inode); +- inode = NULL; +- goto out2; +- } +- ++ refcount_set(&e->users, 1); + e->dentry = dget(dentry); + inode->i_private = e; + inode->i_fop = &bm_entry_operations; +@@ -732,7 +842,8 @@ static ssize_t bm_status_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) + { + int res = parse_command(buffer, count); +- struct dentry *root; ++ Node *e, *next; ++ struct inode *inode; + + switch (res) { + case 1: +@@ -745,13 +856,22 @@ static ssize_t bm_status_write(struct file *file, const char __user *buffer, + break; + case 3: + /* Delete all handlers. */ +- root = file_inode(file)->i_sb->s_root; +- inode_lock(d_inode(root)); ++ inode = d_inode(file_inode(file)->i_sb->s_root); ++ inode_lock(inode); + +- while (!list_empty(&entries)) +- kill_node(list_first_entry(&entries, Node, list)); ++ /* ++ * In order to add new element or remove elements from the list ++ * via bm_{entry,register,status}_write() inode_lock() on the ++ * root inode must be held. ++ * The lock is exclusive ensuring that the list can't be ++ * modified. Only load_misc_binary() can access but does so ++ * read-only. So we only need to take the write lock when we ++ * actually remove the entry from the list. ++ */ ++ list_for_each_entry_safe(e, next, &entries, list) ++ remove_binfmt_handler(e); + +- inode_unlock(d_inode(root)); ++ inode_unlock(inode); + break; + default: + return res; +diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c +index a815ce9cfb518..e6acf09a1507c 100644 +--- a/fs/btrfs/compression.c ++++ b/fs/btrfs/compression.c +@@ -140,16 +140,16 @@ static int compression_decompress_bio(struct list_head *ws, + } + + static int compression_decompress(int type, struct list_head *ws, +- const u8 *data_in, struct page *dest_page, +- unsigned long start_byte, size_t srclen, size_t destlen) ++ const u8 *data_in, struct page *dest_page, ++ unsigned long dest_pgoff, size_t srclen, size_t destlen) + { + switch (type) { + case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page, +- start_byte, srclen, destlen); ++ dest_pgoff, srclen, destlen); + case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page, +- start_byte, srclen, destlen); ++ dest_pgoff, srclen, destlen); + case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page, +- start_byte, srclen, destlen); ++ dest_pgoff, srclen, destlen); + case BTRFS_COMPRESS_NONE: + default: + /* +@@ -941,14 +941,23 @@ static int btrfs_decompress_bio(struct compressed_bio *cb) + * start_byte tells us the offset into the compressed data we're interested in + */ + int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page, +- unsigned long start_byte, size_t srclen, size_t destlen) ++ unsigned long dest_pgoff, size_t srclen, size_t destlen) + { ++ struct btrfs_fs_info *fs_info = btrfs_sb(dest_page->mapping->host->i_sb); + struct list_head *workspace; ++ const u32 sectorsize = fs_info->sectorsize; + int ret; + ++ /* ++ * The full destination page range should not exceed the page size. ++ * And the @destlen should not exceed sectorsize, as this is only called for ++ * inline file extents, which should not exceed sectorsize. ++ */ ++ ASSERT(dest_pgoff + destlen <= PAGE_SIZE && destlen <= sectorsize); ++ + workspace = get_workspace(type, 0); + ret = compression_decompress(type, workspace, data_in, dest_page, +- start_byte, srclen, destlen); ++ dest_pgoff, srclen, destlen); + put_workspace(type, workspace); + + return ret; +diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h +index 03bb9d143fa75..609865c940658 100644 +--- a/fs/btrfs/compression.h ++++ b/fs/btrfs/compression.h +@@ -143,7 +143,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping, + unsigned long *total_in, unsigned long *total_out); + int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb); + int zlib_decompress(struct list_head *ws, const u8 *data_in, +- struct page *dest_page, unsigned long start_byte, size_t srclen, ++ struct page *dest_page, unsigned long dest_pgoff, size_t srclen, + size_t destlen); + struct list_head *zlib_alloc_workspace(unsigned int level); + void zlib_free_workspace(struct list_head *ws); +diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c +index a2e614cf3c19c..e1475dfdf7a8b 100644 +--- a/fs/btrfs/defrag.c ++++ b/fs/btrfs/defrag.c +@@ -416,7 +416,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, + * keep_locks set and lowest_level is 1, regardless of the value of + * path->slots[1]. + */ +- BUG_ON(path->locks[1] == 0); ++ ASSERT(path->locks[1] != 0); + ret = btrfs_realloc_node(trans, root, + path->nodes[1], 0, + &last_ret, +diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c +index 6d562f18d3f80..32c5f5a8a0e93 100644 +--- a/fs/btrfs/delayed-inode.c ++++ b/fs/btrfs/delayed-inode.c +@@ -425,8 +425,6 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) + + delayed_root = delayed_node->root->fs_info->delayed_root; + +- BUG_ON(!delayed_root); +- + if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM) + root = &delayed_node->ins_root; + else +@@ -975,7 +973,7 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node) + + if (delayed_node && + test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) { +- BUG_ON(!delayed_node->root); ++ ASSERT(delayed_node->root); + clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags); + delayed_node->count--; + +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 1cc7e36c64c49..caedd4460d994 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -2800,6 +2800,7 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block + int ret; + + fs_info->sb = sb; ++ /* Temporary fixed values for block size until we read the superblock. */ + sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE; + sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE); + +@@ -3339,6 +3340,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device + sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super); + sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE); + ++ /* Update the values for the current filesystem. */ + sb->s_blocksize = sectorsize; + sb->s_blocksize_bits = blksize_bits(sectorsize); + memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE); +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index c6a95dfa59c81..b2ae50dcca0fe 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -974,7 +974,7 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, + int ret = 0; + size_t pg_offset = 0; + size_t iosize; +- size_t blocksize = inode->i_sb->s_blocksize; ++ size_t blocksize = fs_info->sectorsize; + struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; + + ret = set_page_extent_mapped(page); +@@ -2254,7 +2254,7 @@ int extent_invalidate_folio(struct extent_io_tree *tree, + struct extent_state *cached_state = NULL; + u64 start = folio_pos(folio); + u64 end = start + folio_size(folio) - 1; +- size_t blocksize = folio->mapping->host->i_sb->s_blocksize; ++ size_t blocksize = btrfs_sb(folio->mapping->host->i_sb)->sectorsize; + + /* This function is only called for the btree inode */ + ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO); +diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c +index 3e141c4dd2630..3bcf4a30cad77 100644 +--- a/fs/btrfs/free-space-cache.c ++++ b/fs/btrfs/free-space-cache.c +@@ -1910,9 +1910,9 @@ static inline void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, + ctl->free_space -= bytes; + } + +-static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl, +- struct btrfs_free_space *info, u64 offset, +- u64 bytes) ++static void btrfs_bitmap_set_bits(struct btrfs_free_space_ctl *ctl, ++ struct btrfs_free_space *info, u64 offset, ++ u64 bytes) + { + unsigned long start, count, end; + int extent_delta = 1; +@@ -2248,7 +2248,7 @@ static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl, + + bytes_to_set = min(end - offset, bytes); + +- bitmap_set_bits(ctl, info, offset, bytes_to_set); ++ btrfs_bitmap_set_bits(ctl, info, offset, bytes_to_set); + + return bytes_to_set; + +@@ -2696,15 +2696,16 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group, + u64 offset = bytenr - block_group->start; + u64 to_free, to_unusable; + int bg_reclaim_threshold = 0; +- bool initial = ((size == block_group->length) && (block_group->alloc_offset == 0)); ++ bool initial; + u64 reclaimable_unusable; + +- WARN_ON(!initial && offset + size > block_group->zone_capacity); ++ spin_lock(&block_group->lock); + ++ initial = ((size == block_group->length) && (block_group->alloc_offset == 0)); ++ WARN_ON(!initial && offset + size > block_group->zone_capacity); + if (!initial) + bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold); + +- spin_lock(&ctl->tree_lock); + if (!used) + to_free = size; + else if (initial) +@@ -2717,7 +2718,9 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group, + to_free = offset + size - block_group->alloc_offset; + to_unusable = size - to_free; + ++ spin_lock(&ctl->tree_lock); + ctl->free_space += to_free; ++ spin_unlock(&ctl->tree_lock); + /* + * If the block group is read-only, we should account freed space into + * bytes_readonly. +@@ -2726,11 +2729,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group, + block_group->zone_unusable += to_unusable; + WARN_ON(block_group->zone_unusable > block_group->length); + } +- spin_unlock(&ctl->tree_lock); + if (!used) { +- spin_lock(&block_group->lock); + block_group->alloc_offset -= size; +- spin_unlock(&block_group->lock); + } + + reclaimable_unusable = block_group->zone_unusable - +@@ -2744,6 +2744,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group, + btrfs_mark_bg_to_reclaim(block_group); + } + ++ spin_unlock(&block_group->lock); ++ + return 0; + } + +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 5ddee801a8303..18ce5353092d7 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -730,7 +730,8 @@ static noinline int add_async_extent(struct async_chunk *cow, + struct async_extent *async_extent; + + async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); +- BUG_ON(!async_extent); /* -ENOMEM */ ++ if (!async_extent) ++ return -ENOMEM; + async_extent->start = start; + async_extent->ram_size = ram_size; + async_extent->compressed_size = compressed_size; +@@ -1017,8 +1018,9 @@ static void compress_file_range(struct btrfs_work *work) + * The async work queues will take care of doing actual allocation on + * disk for these compressed pages, and will submit the bios. + */ +- add_async_extent(async_chunk, start, total_in, total_compressed, pages, +- nr_pages, compress_type); ++ ret = add_async_extent(async_chunk, start, total_in, total_compressed, pages, ++ nr_pages, compress_type); ++ BUG_ON(ret); + if (start + total_in < end) { + start += total_in; + cond_resched(); +@@ -1030,8 +1032,9 @@ static void compress_file_range(struct btrfs_work *work) + if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress) + inode->flags |= BTRFS_INODE_NOCOMPRESS; + cleanup_and_bail_uncompressed: +- add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, +- BTRFS_COMPRESS_NONE); ++ ret = add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, ++ BTRFS_COMPRESS_NONE); ++ BUG_ON(ret); + free_pages: + if (pages) { + for (i = 0; i < nr_pages; i++) { +@@ -4371,7 +4374,14 @@ static noinline int may_destroy_subvol(struct btrfs_root *root) + ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); + if (ret < 0) + goto out; +- BUG_ON(ret == 0); ++ if (ret == 0) { ++ /* ++ * Key with offset -1 found, there would have to exist a root ++ * with such id, but this is out of valid range. ++ */ ++ ret = -EUCLEAN; ++ goto out; ++ } + + ret = 0; + if (path->slots[0] > 0) { +@@ -8685,7 +8695,7 @@ static int btrfs_getattr(struct mnt_idmap *idmap, + u64 delalloc_bytes; + u64 inode_bytes; + struct inode *inode = d_inode(path->dentry); +- u32 blocksize = inode->i_sb->s_blocksize; ++ u32 blocksize = btrfs_sb(inode->i_sb)->sectorsize; + u32 bi_flags = BTRFS_I(inode)->flags; + u32 bi_ro_flags = BTRFS_I(inode)->ro_flags; + +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index 3f43a08613d8a..d5297523d4977 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -528,7 +528,7 @@ static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info, + * block group is in the logical address space, which can be any + * sectorsize aligned bytenr in the range [0, U64_MAX]. + */ +- if (range.len < fs_info->sb->s_blocksize) ++ if (range.len < fs_info->sectorsize) + return -EINVAL; + + range.minlen = max(range.minlen, minlen); +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c +index 223dfbf009938..efe84be65a440 100644 +--- a/fs/btrfs/qgroup.c ++++ b/fs/btrfs/qgroup.c +@@ -2725,8 +2725,6 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr, + if (nr_old_roots == 0 && nr_new_roots == 0) + goto out_free; + +- BUG_ON(!fs_info->quota_root); +- + trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr, + num_bytes, nr_old_roots, nr_new_roots); + +diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c +index 65d2bd6910f2c..9f60aa79a8c50 100644 +--- a/fs/btrfs/reflink.c ++++ b/fs/btrfs/reflink.c +@@ -664,7 +664,7 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len, + struct inode *dst, u64 dst_loff) + { + struct btrfs_fs_info *fs_info = BTRFS_I(src)->root->fs_info; +- const u64 bs = fs_info->sb->s_blocksize; ++ const u64 bs = fs_info->sectorsize; + int ret; + + /* +@@ -731,7 +731,7 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src, + int ret; + int wb_ret; + u64 len = olen; +- u64 bs = fs_info->sb->s_blocksize; ++ u64 bs = fs_info->sectorsize; + + /* + * VFS's generic_remap_file_range_prep() protects us from cloning the +@@ -797,7 +797,7 @@ static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in, + { + struct inode *inode_in = file_inode(file_in); + struct inode *inode_out = file_inode(file_out); +- u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize; ++ u64 bs = BTRFS_I(inode_out)->root->fs_info->sectorsize; + u64 wb_len; + int ret; + +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c +index 651f0865bb0df..e912d8e411cc4 100644 +--- a/fs/btrfs/send.c ++++ b/fs/btrfs/send.c +@@ -777,7 +777,12 @@ static int begin_cmd(struct send_ctx *sctx, int cmd) + if (WARN_ON(!sctx->send_buf)) + return -EINVAL; + +- BUG_ON(sctx->send_size); ++ if (unlikely(sctx->send_size != 0)) { ++ btrfs_err(sctx->send_root->fs_info, ++ "send: command header buffer not empty cmd %d offset %llu", ++ cmd, sctx->send_off); ++ return -EINVAL; ++ } + + sctx->send_size += sizeof(*hdr); + hdr = (struct btrfs_cmd_header *)sctx->send_buf; +@@ -4190,7 +4195,13 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) + * This should never happen as the root dir always has the same ref + * which is always '..' + */ +- BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID); ++ if (unlikely(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID)) { ++ btrfs_err(fs_info, ++ "send: unexpected inode %llu in process_recorded_refs()", ++ sctx->cur_ino); ++ ret = -EINVAL; ++ goto out; ++ } + + valid_path = fs_path_alloc(); + if (!valid_path) { +@@ -6148,26 +6159,52 @@ static int send_write_or_clone(struct send_ctx *sctx, + int ret = 0; + u64 offset = key->offset; + u64 end; +- u64 bs = sctx->send_root->fs_info->sb->s_blocksize; ++ u64 bs = sctx->send_root->fs_info->sectorsize; ++ struct btrfs_file_extent_item *ei; ++ u64 disk_byte; ++ u64 data_offset; ++ u64 num_bytes; ++ struct btrfs_inode_info info = { 0 }; + + end = min_t(u64, btrfs_file_extent_end(path), sctx->cur_inode_size); + if (offset >= end) + return 0; + +- if (clone_root && IS_ALIGNED(end, bs)) { +- struct btrfs_file_extent_item *ei; +- u64 disk_byte; +- u64 data_offset; ++ num_bytes = end - offset; + +- ei = btrfs_item_ptr(path->nodes[0], path->slots[0], +- struct btrfs_file_extent_item); +- disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei); +- data_offset = btrfs_file_extent_offset(path->nodes[0], ei); +- ret = clone_range(sctx, path, clone_root, disk_byte, +- data_offset, offset, end - offset); +- } else { +- ret = send_extent_data(sctx, path, offset, end - offset); +- } ++ if (!clone_root) ++ goto write_data; ++ ++ if (IS_ALIGNED(end, bs)) ++ goto clone_data; ++ ++ /* ++ * If the extent end is not aligned, we can clone if the extent ends at ++ * the i_size of the inode and the clone range ends at the i_size of the ++ * source inode, otherwise the clone operation fails with -EINVAL. ++ */ ++ if (end != sctx->cur_inode_size) ++ goto write_data; ++ ++ ret = get_inode_info(clone_root->root, clone_root->ino, &info); ++ if (ret < 0) ++ return ret; ++ ++ if (clone_root->offset + num_bytes == info.size) ++ goto clone_data; ++ ++write_data: ++ ret = send_extent_data(sctx, path, offset, num_bytes); ++ sctx->cur_inode_next_write_offset = end; ++ return ret; ++ ++clone_data: ++ ei = btrfs_item_ptr(path->nodes[0], path->slots[0], ++ struct btrfs_file_extent_item); ++ disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei); ++ data_offset = btrfs_file_extent_offset(path->nodes[0], ei); ++ ret = clone_range(sctx, path, clone_root, disk_byte, data_offset, offset, ++ num_bytes); + sctx->cur_inode_next_write_offset = end; + return ret; + } +@@ -7437,8 +7474,8 @@ static int tree_move_down(struct btrfs_path *path, int *level, u64 reada_min_gen + u64 reada_done = 0; + + lockdep_assert_held_read(&parent->fs_info->commit_root_sem); ++ ASSERT(*level != 0); + +- BUG_ON(*level == 0); + eb = btrfs_read_node_slot(parent, slot); + if (IS_ERR(eb)) + return PTR_ERR(eb); +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c +index de0bfebce1269..e33587a814098 100644 +--- a/fs/btrfs/super.c ++++ b/fs/btrfs/super.c +@@ -2124,7 +2124,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) + buf->f_bavail = 0; + + buf->f_type = BTRFS_SUPER_MAGIC; +- buf->f_bsize = dentry->d_sb->s_blocksize; ++ buf->f_bsize = fs_info->sectorsize; + buf->f_namelen = BTRFS_NAME_LEN; + + /* We treat it as constant endianness (it doesn't matter _which_) +diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c +index 1cc86af97dc6e..f4fd3fb7c887b 100644 +--- a/fs/btrfs/tests/extent-io-tests.c ++++ b/fs/btrfs/tests/extent-io-tests.c +@@ -11,6 +11,7 @@ + #include "btrfs-tests.h" + #include "../ctree.h" + #include "../extent_io.h" ++#include "../disk-io.h" + #include "../btrfs_inode.h" + + #define PROCESS_UNLOCK (1 << 0) +@@ -105,9 +106,11 @@ static void dump_extent_io_tree(const struct extent_io_tree *tree) + } + } + +-static int test_find_delalloc(u32 sectorsize) ++static int test_find_delalloc(u32 sectorsize, u32 nodesize) + { +- struct inode *inode; ++ struct btrfs_fs_info *fs_info; ++ struct btrfs_root *root = NULL; ++ struct inode *inode = NULL; + struct extent_io_tree *tmp; + struct page *page; + struct page *locked_page = NULL; +@@ -121,12 +124,27 @@ static int test_find_delalloc(u32 sectorsize) + + test_msg("running find delalloc tests"); + ++ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); ++ if (!fs_info) { ++ test_std_err(TEST_ALLOC_FS_INFO); ++ return -ENOMEM; ++ } ++ ++ root = btrfs_alloc_dummy_root(fs_info); ++ if (IS_ERR(root)) { ++ test_std_err(TEST_ALLOC_ROOT); ++ ret = PTR_ERR(root); ++ goto out; ++ } ++ + inode = btrfs_new_test_inode(); + if (!inode) { + test_std_err(TEST_ALLOC_INODE); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto out; + } + tmp = &BTRFS_I(inode)->io_tree; ++ BTRFS_I(inode)->root = root; + + /* + * Passing NULL as we don't have fs_info but tracepoints are not used +@@ -316,6 +334,8 @@ static int test_find_delalloc(u32 sectorsize) + process_page_range(inode, 0, total_dirty - 1, + PROCESS_UNLOCK | PROCESS_RELEASE); + iput(inode); ++ btrfs_free_dummy_root(root); ++ btrfs_free_dummy_fs_info(fs_info); + return ret; + } + +@@ -794,7 +814,7 @@ int btrfs_test_extent_io(u32 sectorsize, u32 nodesize) + + test_msg("running extent I/O tests"); + +- ret = test_find_delalloc(sectorsize); ++ ret = test_find_delalloc(sectorsize, nodesize); + if (ret) + goto out; + +diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c +index 5d6cfa618dc47..7ff61909648d7 100644 +--- a/fs/btrfs/tree-checker.c ++++ b/fs/btrfs/tree-checker.c +@@ -548,9 +548,10 @@ static int check_dir_item(struct extent_buffer *leaf, + + /* dir type check */ + dir_type = btrfs_dir_ftype(leaf, di); +- if (unlikely(dir_type >= BTRFS_FT_MAX)) { ++ if (unlikely(dir_type <= BTRFS_FT_UNKNOWN || ++ dir_type >= BTRFS_FT_MAX)) { + dir_item_err(leaf, slot, +- "invalid dir item type, have %u expect [0, %u)", ++ "invalid dir item type, have %u expect (0, %u)", + dir_type, BTRFS_FT_MAX); + return -EUCLEAN; + } +@@ -1670,6 +1671,72 @@ static int check_inode_ref(struct extent_buffer *leaf, + return 0; + } + ++static int check_dev_extent_item(const struct extent_buffer *leaf, ++ const struct btrfs_key *key, ++ int slot, ++ struct btrfs_key *prev_key) ++{ ++ struct btrfs_dev_extent *de; ++ const u32 sectorsize = leaf->fs_info->sectorsize; ++ ++ de = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); ++ /* Basic fixed member checks. */ ++ if (unlikely(btrfs_dev_extent_chunk_tree(leaf, de) != ++ BTRFS_CHUNK_TREE_OBJECTID)) { ++ generic_err(leaf, slot, ++ "invalid dev extent chunk tree id, has %llu expect %llu", ++ btrfs_dev_extent_chunk_tree(leaf, de), ++ BTRFS_CHUNK_TREE_OBJECTID); ++ return -EUCLEAN; ++ } ++ if (unlikely(btrfs_dev_extent_chunk_objectid(leaf, de) != ++ BTRFS_FIRST_CHUNK_TREE_OBJECTID)) { ++ generic_err(leaf, slot, ++ "invalid dev extent chunk objectid, has %llu expect %llu", ++ btrfs_dev_extent_chunk_objectid(leaf, de), ++ BTRFS_FIRST_CHUNK_TREE_OBJECTID); ++ return -EUCLEAN; ++ } ++ /* Alignment check. */ ++ if (unlikely(!IS_ALIGNED(key->offset, sectorsize))) { ++ generic_err(leaf, slot, ++ "invalid dev extent key.offset, has %llu not aligned to %u", ++ key->offset, sectorsize); ++ return -EUCLEAN; ++ } ++ if (unlikely(!IS_ALIGNED(btrfs_dev_extent_chunk_offset(leaf, de), ++ sectorsize))) { ++ generic_err(leaf, slot, ++ "invalid dev extent chunk offset, has %llu not aligned to %u", ++ btrfs_dev_extent_chunk_objectid(leaf, de), ++ sectorsize); ++ return -EUCLEAN; ++ } ++ if (unlikely(!IS_ALIGNED(btrfs_dev_extent_length(leaf, de), ++ sectorsize))) { ++ generic_err(leaf, slot, ++ "invalid dev extent length, has %llu not aligned to %u", ++ btrfs_dev_extent_length(leaf, de), sectorsize); ++ return -EUCLEAN; ++ } ++ /* Overlap check with previous dev extent. */ ++ if (slot && prev_key->objectid == key->objectid && ++ prev_key->type == key->type) { ++ struct btrfs_dev_extent *prev_de; ++ u64 prev_len; ++ ++ prev_de = btrfs_item_ptr(leaf, slot - 1, struct btrfs_dev_extent); ++ prev_len = btrfs_dev_extent_length(leaf, prev_de); ++ if (unlikely(prev_key->offset + prev_len > key->offset)) { ++ generic_err(leaf, slot, ++ "dev extent overlap, prev offset %llu len %llu current offset %llu", ++ prev_key->objectid, prev_len, key->offset); ++ return -EUCLEAN; ++ } ++ } ++ return 0; ++} ++ + /* + * Common point to switch the item-specific validation. + */ +@@ -1706,6 +1773,9 @@ static enum btrfs_tree_block_status check_leaf_item(struct extent_buffer *leaf, + case BTRFS_DEV_ITEM_KEY: + ret = check_dev_item(leaf, key, slot); + break; ++ case BTRFS_DEV_EXTENT_KEY: ++ ret = check_dev_extent_item(leaf, key, slot, prev_key); ++ break; + case BTRFS_INODE_ITEM_KEY: + ret = check_inode_item(leaf, key, slot); + break; +diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c +index 6c231a116a29c..9f60d0bbd5306 100644 +--- a/fs/btrfs/zlib.c ++++ b/fs/btrfs/zlib.c +@@ -354,18 +354,13 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb) + } + + int zlib_decompress(struct list_head *ws, const u8 *data_in, +- struct page *dest_page, unsigned long start_byte, size_t srclen, ++ struct page *dest_page, unsigned long dest_pgoff, size_t srclen, + size_t destlen) + { + struct workspace *workspace = list_entry(ws, struct workspace, list); + int ret = 0; + int wbits = MAX_WBITS; +- unsigned long bytes_left; +- unsigned long total_out = 0; +- unsigned long pg_offset = 0; +- +- destlen = min_t(unsigned long, destlen, PAGE_SIZE); +- bytes_left = destlen; ++ unsigned long to_copy; + + workspace->strm.next_in = data_in; + workspace->strm.avail_in = srclen; +@@ -390,60 +385,30 @@ int zlib_decompress(struct list_head *ws, const u8 *data_in, + return -EIO; + } + +- while (bytes_left > 0) { +- unsigned long buf_start; +- unsigned long buf_offset; +- unsigned long bytes; +- +- ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH); +- if (ret != Z_OK && ret != Z_STREAM_END) +- break; +- +- buf_start = total_out; +- total_out = workspace->strm.total_out; +- +- if (total_out == buf_start) { +- ret = -EIO; +- break; +- } +- +- if (total_out <= start_byte) +- goto next; +- +- if (total_out > start_byte && buf_start < start_byte) +- buf_offset = start_byte - buf_start; +- else +- buf_offset = 0; +- +- bytes = min(PAGE_SIZE - pg_offset, +- PAGE_SIZE - (buf_offset % PAGE_SIZE)); +- bytes = min(bytes, bytes_left); ++ /* ++ * Everything (in/out buf) should be at most one sector, there should ++ * be no need to switch any input/output buffer. ++ */ ++ ret = zlib_inflate(&workspace->strm, Z_FINISH); ++ to_copy = min(workspace->strm.total_out, destlen); ++ if (ret != Z_STREAM_END) ++ goto out; + +- memcpy_to_page(dest_page, pg_offset, +- workspace->buf + buf_offset, bytes); ++ memcpy_to_page(dest_page, dest_pgoff, workspace->buf, to_copy); + +- pg_offset += bytes; +- bytes_left -= bytes; +-next: +- workspace->strm.next_out = workspace->buf; +- workspace->strm.avail_out = workspace->buf_size; +- } +- +- if (ret != Z_STREAM_END && bytes_left != 0) ++out: ++ if (unlikely(to_copy != destlen)) { ++ pr_warn_ratelimited("BTRFS: infalte failed, decompressed=%lu expected=%zu\n", ++ to_copy, destlen); + ret = -EIO; +- else ++ } else { + ret = 0; ++ } + + zlib_inflateEnd(&workspace->strm); + +- /* +- * this should only happen if zlib returned fewer bytes than we +- * expected. btrfs_get_block is responsible for zeroing from the +- * end of the inline extent (destlen) to the end of the page +- */ +- if (pg_offset < destlen) { +- memzero_page(dest_page, pg_offset, destlen - pg_offset); +- } ++ if (unlikely(to_copy < destlen)) ++ memzero_page(dest_page, dest_pgoff + to_copy, destlen - to_copy); + return ret; + } + +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index d393df22431a0..448e0ea49b31d 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -3402,9 +3402,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, + struct ext4_extent *ex, *abut_ex; + ext4_lblk_t ee_block, eof_block; + unsigned int ee_len, depth, map_len = map->m_len; +- int allocated = 0, max_zeroout = 0; + int err = 0; + int split_flag = EXT4_EXT_DATA_VALID2; ++ int allocated = 0; ++ unsigned int max_zeroout = 0; + + ext_debug(inode, "logical block %llu, max_blocks %u\n", + (unsigned long long)map->m_lblk, map_len); +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index f55ab800a7539..870397f3de559 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -6953,6 +6953,9 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) + bool set_trimmed = false; + void *bitmap; + ++ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) ++ return 0; ++ + last = ext4_last_grp_cluster(sb, e4b->bd_group); + bitmap = e4b->bd_bitmap; + if (start == 0 && max >= last) +diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c +index 804958c6de34c..e3e2c0b2f4959 100644 +--- a/fs/f2fs/segment.c ++++ b/fs/f2fs/segment.c +@@ -2398,6 +2398,8 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) + #endif + + segno = GET_SEGNO(sbi, blkaddr); ++ if (segno == NULL_SEGNO) ++ return; + + se = get_seg_entry(sbi, segno); + new_vblocks = se->valid_blocks + del; +@@ -2646,6 +2648,7 @@ static void get_new_segment(struct f2fs_sb_info *sbi, + unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg); + bool init = true; + int i; ++ int ret = 0; + + spin_lock(&free_i->segmap_lock); + +@@ -2670,7 +2673,10 @@ static void get_new_segment(struct f2fs_sb_info *sbi, + if (secno >= MAIN_SECS(sbi)) { + secno = find_first_zero_bit(free_i->free_secmap, + MAIN_SECS(sbi)); +- f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi)); ++ if (secno >= MAIN_SECS(sbi)) { ++ ret = -ENOSPC; ++ goto out_unlock; ++ } + } + segno = GET_SEG_FROM_SEC(sbi, secno); + zoneno = GET_ZONE_FROM_SEC(sbi, secno); +@@ -2700,7 +2706,13 @@ static void get_new_segment(struct f2fs_sb_info *sbi, + f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap)); + __set_inuse(sbi, segno); + *newseg = segno; ++out_unlock: + spin_unlock(&free_i->segmap_lock); ++ ++ if (ret) { ++ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT); ++ f2fs_bug_on(sbi, 1); ++ } + } + + static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) +@@ -3471,8 +3483,7 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, + * since SSR needs latest valid block information. + */ + update_sit_entry(sbi, *new_blkaddr, 1); +- if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) +- update_sit_entry(sbi, old_blkaddr, -1); ++ update_sit_entry(sbi, old_blkaddr, -1); + + /* + * If the current segment is full, flush it out and replace it with a +diff --git a/fs/file.c b/fs/file.c +index b64a84137c00f..ca984655a187f 100644 +--- a/fs/file.c ++++ b/fs/file.c +@@ -46,27 +46,23 @@ static void free_fdtable_rcu(struct rcu_head *rcu) + #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr)) + #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long)) + ++#define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds + /* + * Copy 'count' fd bits from the old table to the new table and clear the extra + * space if any. This does not copy the file pointers. Called with the files + * spinlock held for write. + */ +-static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt, +- unsigned int count) ++static inline void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt, ++ unsigned int copy_words) + { +- unsigned int cpy, set; +- +- cpy = count / BITS_PER_BYTE; +- set = (nfdt->max_fds - count) / BITS_PER_BYTE; +- memcpy(nfdt->open_fds, ofdt->open_fds, cpy); +- memset((char *)nfdt->open_fds + cpy, 0, set); +- memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy); +- memset((char *)nfdt->close_on_exec + cpy, 0, set); +- +- cpy = BITBIT_SIZE(count); +- set = BITBIT_SIZE(nfdt->max_fds) - cpy; +- memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy); +- memset((char *)nfdt->full_fds_bits + cpy, 0, set); ++ unsigned int nwords = fdt_words(nfdt); ++ ++ bitmap_copy_and_extend(nfdt->open_fds, ofdt->open_fds, ++ copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG); ++ bitmap_copy_and_extend(nfdt->close_on_exec, ofdt->close_on_exec, ++ copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG); ++ bitmap_copy_and_extend(nfdt->full_fds_bits, ofdt->full_fds_bits, ++ copy_words, nwords); + } + + /* +@@ -84,7 +80,7 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) + memcpy(nfdt->fd, ofdt->fd, cpy); + memset((char *)nfdt->fd + cpy, 0, set); + +- copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds); ++ copy_fd_bitmaps(nfdt, ofdt, fdt_words(ofdt)); + } + + /* +@@ -374,7 +370,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int + open_files = sane_fdtable_size(old_fdt, max_fds); + } + +- copy_fd_bitmaps(new_fdt, old_fdt, open_files); ++ copy_fd_bitmaps(new_fdt, old_fdt, open_files / BITS_PER_LONG); + + old_fds = old_fdt->fd; + new_fds = new_fdt->fd; +diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c +index bce2492186d0b..d4d4b3a8b1060 100644 +--- a/fs/fscache/cookie.c ++++ b/fs/fscache/cookie.c +@@ -741,6 +741,10 @@ static void fscache_cookie_state_machine(struct fscache_cookie *cookie) + spin_lock(&cookie->lock); + } + if (test_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags)) { ++ if (atomic_read(&cookie->n_accesses) != 0) ++ /* still being accessed: postpone it */ ++ break; ++ + __fscache_set_cookie_state(cookie, + FSCACHE_COOKIE_STATE_LRU_DISCARDING); + wake = true; +diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c +index 91e89e68177ee..b6cad106c37e4 100644 +--- a/fs/fuse/cuse.c ++++ b/fs/fuse/cuse.c +@@ -474,8 +474,7 @@ static int cuse_send_init(struct cuse_conn *cc) + + static void cuse_fc_release(struct fuse_conn *fc) + { +- struct cuse_conn *cc = fc_to_cc(fc); +- kfree_rcu(cc, fc.rcu); ++ kfree(fc_to_cc(fc)); + } + + /** +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c +index 1a8f82f478cb7..8573d79ef29c8 100644 +--- a/fs/fuse/dev.c ++++ b/fs/fuse/dev.c +@@ -1618,9 +1618,11 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size, + + this_num = min_t(unsigned, num, PAGE_SIZE - offset); + err = fuse_copy_page(cs, &page, offset, this_num, 0); +- if (!err && offset == 0 && +- (this_num == PAGE_SIZE || file_size == end)) ++ if (!PageUptodate(page) && !err && offset == 0 && ++ (this_num == PAGE_SIZE || file_size == end)) { ++ zero_user_segment(page, this_num, PAGE_SIZE); + SetPageUptodate(page); ++ } + unlock_page(page); + put_page(page); + +diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h +index 3e65cdc946316..4ce1a6fdc94f0 100644 +--- a/fs/fuse/fuse_i.h ++++ b/fs/fuse/fuse_i.h +@@ -888,6 +888,7 @@ struct fuse_mount { + + /* Entry on fc->mounts */ + struct list_head fc_entry; ++ struct rcu_head rcu; + }; + + static inline struct fuse_mount *get_fuse_mount_super(struct super_block *sb) +diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c +index 04c7ba3ea03a2..735abf426a064 100644 +--- a/fs/fuse/inode.c ++++ b/fs/fuse/inode.c +@@ -952,6 +952,14 @@ void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm, + } + EXPORT_SYMBOL_GPL(fuse_conn_init); + ++static void delayed_release(struct rcu_head *p) ++{ ++ struct fuse_conn *fc = container_of(p, struct fuse_conn, rcu); ++ ++ put_user_ns(fc->user_ns); ++ fc->release(fc); ++} ++ + void fuse_conn_put(struct fuse_conn *fc) + { + if (refcount_dec_and_test(&fc->count)) { +@@ -963,13 +971,12 @@ void fuse_conn_put(struct fuse_conn *fc) + if (fiq->ops->release) + fiq->ops->release(fiq); + put_pid_ns(fc->pid_ns); +- put_user_ns(fc->user_ns); + bucket = rcu_dereference_protected(fc->curr_bucket, 1); + if (bucket) { + WARN_ON(atomic_read(&bucket->count) != 1); + kfree(bucket); + } +- fc->release(fc); ++ call_rcu(&fc->rcu, delayed_release); + } + } + EXPORT_SYMBOL_GPL(fuse_conn_put); +@@ -1387,7 +1394,7 @@ EXPORT_SYMBOL_GPL(fuse_send_init); + void fuse_free_conn(struct fuse_conn *fc) + { + WARN_ON(!list_empty(&fc->devices)); +- kfree_rcu(fc, rcu); ++ kfree(fc); + } + EXPORT_SYMBOL_GPL(fuse_free_conn); + +@@ -1921,7 +1928,7 @@ static void fuse_sb_destroy(struct super_block *sb) + void fuse_mount_destroy(struct fuse_mount *fm) + { + fuse_conn_put(fm->fc); +- kfree(fm); ++ kfree_rcu(fm, rcu); + } + EXPORT_SYMBOL(fuse_mount_destroy); + +diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c +index 5f1be1da92ce9..d84dacbdce2c9 100644 +--- a/fs/fuse/virtio_fs.c ++++ b/fs/fuse/virtio_fs.c +@@ -323,6 +323,16 @@ static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs) + return -ENOMEM; + memcpy(fs->tag, tag_buf, len); + fs->tag[len] = '\0'; ++ ++ /* While the VIRTIO specification allows any character, newlines are ++ * awkward on mount(8) command-lines and cause problems in the sysfs ++ * "tag" attr and uevent TAG= properties. Forbid them. ++ */ ++ if (strchr(fs->tag, '\n')) { ++ dev_dbg(&vdev->dev, "refusing virtiofs tag with newline character\n"); ++ return -EINVAL; ++ } ++ + return 0; + } + +diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c +index 3de0d8ab42eaf..29085643ad104 100644 +--- a/fs/gfs2/inode.c ++++ b/fs/gfs2/inode.c +@@ -1929,7 +1929,7 @@ static int setattr_chown(struct inode *inode, struct iattr *attr) + kuid_t ouid, nuid; + kgid_t ogid, ngid; + int error; +- struct gfs2_alloc_parms ap; ++ struct gfs2_alloc_parms ap = {}; + + ouid = inode->i_uid; + ogid = inode->i_gid; +diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c +index 8b34c6cf9293f..1200cb8059995 100644 +--- a/fs/gfs2/super.c ++++ b/fs/gfs2/super.c +@@ -819,6 +819,7 @@ static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who) + if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags)) + goto out; + ++ atomic_inc(&sb->s_active); + gfs2_freeze_unlock(&sdp->sd_freeze_gh); + + error = gfs2_do_thaw(sdp); +@@ -829,6 +830,7 @@ static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who) + } + out: + mutex_unlock(&sdp->sd_freeze_mutex); ++ deactivate_super(sb); + return error; + } + +diff --git a/fs/inode.c b/fs/inode.c +index ae1a6410b53d7..776c2a049d07a 100644 +--- a/fs/inode.c ++++ b/fs/inode.c +@@ -486,6 +486,39 @@ static void inode_lru_list_del(struct inode *inode) + this_cpu_dec(nr_unused); + } + ++static void inode_pin_lru_isolating(struct inode *inode) ++{ ++ lockdep_assert_held(&inode->i_lock); ++ WARN_ON(inode->i_state & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE)); ++ inode->i_state |= I_LRU_ISOLATING; ++} ++ ++static void inode_unpin_lru_isolating(struct inode *inode) ++{ ++ spin_lock(&inode->i_lock); ++ WARN_ON(!(inode->i_state & I_LRU_ISOLATING)); ++ inode->i_state &= ~I_LRU_ISOLATING; ++ smp_mb(); ++ wake_up_bit(&inode->i_state, __I_LRU_ISOLATING); ++ spin_unlock(&inode->i_lock); ++} ++ ++static void inode_wait_for_lru_isolating(struct inode *inode) ++{ ++ spin_lock(&inode->i_lock); ++ if (inode->i_state & I_LRU_ISOLATING) { ++ DEFINE_WAIT_BIT(wq, &inode->i_state, __I_LRU_ISOLATING); ++ wait_queue_head_t *wqh; ++ ++ wqh = bit_waitqueue(&inode->i_state, __I_LRU_ISOLATING); ++ spin_unlock(&inode->i_lock); ++ __wait_on_bit(wqh, &wq, bit_wait, TASK_UNINTERRUPTIBLE); ++ spin_lock(&inode->i_lock); ++ WARN_ON(inode->i_state & I_LRU_ISOLATING); ++ } ++ spin_unlock(&inode->i_lock); ++} ++ + /** + * inode_sb_list_add - add inode to the superblock list of inodes + * @inode: inode to add +@@ -654,6 +687,8 @@ static void evict(struct inode *inode) + + inode_sb_list_del(inode); + ++ inode_wait_for_lru_isolating(inode); ++ + /* + * Wait for flusher thread to be done with the inode so that filesystem + * does not start destroying it while writeback is still running. Since +@@ -842,7 +877,7 @@ static enum lru_status inode_lru_isolate(struct list_head *item, + * be under pressure before the cache inside the highmem zone. + */ + if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) { +- __iget(inode); ++ inode_pin_lru_isolating(inode); + spin_unlock(&inode->i_lock); + spin_unlock(lru_lock); + if (remove_inode_buffers(inode)) { +@@ -854,7 +889,7 @@ static enum lru_status inode_lru_isolate(struct list_head *item, + __count_vm_events(PGINODESTEAL, reap); + mm_account_reclaimed_pages(reap); + } +- iput(inode); ++ inode_unpin_lru_isolating(inode); + spin_lock(lru_lock); + return LRU_RETRY; + } +diff --git a/fs/jfs/jfs_dinode.h b/fs/jfs/jfs_dinode.h +index 6b231d0d0071b..603aae17a6934 100644 +--- a/fs/jfs/jfs_dinode.h ++++ b/fs/jfs/jfs_dinode.h +@@ -96,7 +96,7 @@ struct dinode { + #define di_gengen u._file._u1._imap._gengen + + union { +- xtpage_t _xtroot; ++ xtroot_t _xtroot; + struct { + u8 unused[16]; /* 16: */ + dxd_t _dxd; /* 16: */ +diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c +index 4bc589c4dcca9..82d88dcf0ea6b 100644 +--- a/fs/jfs/jfs_imap.c ++++ b/fs/jfs/jfs_imap.c +@@ -673,7 +673,7 @@ int diWrite(tid_t tid, struct inode *ip) + * This is the special xtree inside the directory for storing + * the directory table + */ +- xtpage_t *p, *xp; ++ xtroot_t *p, *xp; + xad_t *xad; + + jfs_ip->xtlid = 0; +@@ -687,7 +687,7 @@ int diWrite(tid_t tid, struct inode *ip) + * copy xtree root from inode to dinode: + */ + p = &jfs_ip->i_xtroot; +- xp = (xtpage_t *) &dp->di_dirtable; ++ xp = (xtroot_t *) &dp->di_dirtable; + lv = ilinelock->lv; + for (n = 0; n < ilinelock->index; n++, lv++) { + memcpy(&xp->xad[lv->offset], &p->xad[lv->offset], +@@ -716,7 +716,7 @@ int diWrite(tid_t tid, struct inode *ip) + * regular file: 16 byte (XAD slot) granularity + */ + if (type & tlckXTREE) { +- xtpage_t *p, *xp; ++ xtroot_t *p, *xp; + xad_t *xad; + + /* +diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h +index d3c35068cb761..10934f9a11be3 100644 +--- a/fs/jfs/jfs_incore.h ++++ b/fs/jfs/jfs_incore.h +@@ -66,7 +66,7 @@ struct jfs_inode_info { + lid_t xtlid; /* lid of xtree lock on directory */ + union { + struct { +- xtpage_t _xtroot; /* 288: xtree root */ ++ xtroot_t _xtroot; /* 288: xtree root */ + struct inomap *_imap; /* 4: inode map header */ + } file; + struct { +diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c +index ce4b4760fcb1d..dccc8b3f10459 100644 +--- a/fs/jfs/jfs_txnmgr.c ++++ b/fs/jfs/jfs_txnmgr.c +@@ -783,7 +783,7 @@ struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp, + if (mp->xflag & COMMIT_PAGE) + p = (xtpage_t *) mp->data; + else +- p = &jfs_ip->i_xtroot; ++ p = (xtpage_t *) &jfs_ip->i_xtroot; + xtlck->lwm.offset = + le16_to_cpu(p->header.nextindex); + } +@@ -1676,7 +1676,7 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, + + if (tlck->type & tlckBTROOT) { + lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT); +- p = &JFS_IP(ip)->i_xtroot; ++ p = (xtpage_t *) &JFS_IP(ip)->i_xtroot; + if (S_ISDIR(ip->i_mode)) + lrd->log.redopage.type |= + cpu_to_le16(LOG_DIR_XTREE); +diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c +index 2d304cee884c6..5ee618d17e773 100644 +--- a/fs/jfs/jfs_xtree.c ++++ b/fs/jfs/jfs_xtree.c +@@ -1213,7 +1213,7 @@ xtSplitRoot(tid_t tid, + struct xtlock *xtlck; + int rc; + +- sp = &JFS_IP(ip)->i_xtroot; ++ sp = (xtpage_t *) &JFS_IP(ip)->i_xtroot; + + INCREMENT(xtStat.split); + +@@ -2098,7 +2098,7 @@ int xtAppend(tid_t tid, /* transaction id */ + */ + void xtInitRoot(tid_t tid, struct inode *ip) + { +- xtpage_t *p; ++ xtroot_t *p; + + /* + * acquire a transaction lock on the root +diff --git a/fs/jfs/jfs_xtree.h b/fs/jfs/jfs_xtree.h +index ad7592191d760..0f6cf5a1ce75b 100644 +--- a/fs/jfs/jfs_xtree.h ++++ b/fs/jfs/jfs_xtree.h +@@ -65,24 +65,33 @@ struct xadlist { + #define XTPAGEMAXSLOT 256 + #define XTENTRYSTART 2 + +-/* +- * xtree page: +- */ +-typedef union { +- struct xtheader { +- __le64 next; /* 8: */ +- __le64 prev; /* 8: */ ++struct xtheader { ++ __le64 next; /* 8: */ ++ __le64 prev; /* 8: */ + +- u8 flag; /* 1: */ +- u8 rsrvd1; /* 1: */ +- __le16 nextindex; /* 2: next index = number of entries */ +- __le16 maxentry; /* 2: max number of entries */ +- __le16 rsrvd2; /* 2: */ ++ u8 flag; /* 1: */ ++ u8 rsrvd1; /* 1: */ ++ __le16 nextindex; /* 2: next index = number of entries */ ++ __le16 maxentry; /* 2: max number of entries */ ++ __le16 rsrvd2; /* 2: */ + +- pxd_t self; /* 8: self */ +- } header; /* (32) */ ++ pxd_t self; /* 8: self */ ++}; + ++/* ++ * xtree root (in inode): ++ */ ++typedef union { ++ struct xtheader header; + xad_t xad[XTROOTMAXSLOT]; /* 16 * maxentry: xad array */ ++} xtroot_t; ++ ++/* ++ * xtree page: ++ */ ++typedef union { ++ struct xtheader header; ++ xad_t xad[XTPAGEMAXSLOT]; /* 16 * maxentry: xad array */ + } xtpage_t; + + /* +diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c +index 180906c36f515..332d08d2fe0d5 100644 +--- a/fs/kernfs/file.c ++++ b/fs/kernfs/file.c +@@ -532,9 +532,11 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma) + goto out_put; + + rc = 0; +- of->mmapped = true; +- of_on(of)->nr_mmapped++; +- of->vm_ops = vma->vm_ops; ++ if (!of->mmapped) { ++ of->mmapped = true; ++ of_on(of)->nr_mmapped++; ++ of->vm_ops = vma->vm_ops; ++ } + vma->vm_ops = &kernfs_vm_ops; + out_put: + kernfs_put_active(of->kn); +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c +index 9084f156d67bf..664d3128e730c 100644 +--- a/fs/nfs/pnfs.c ++++ b/fs/nfs/pnfs.c +@@ -1997,6 +1997,14 @@ pnfs_update_layout(struct inode *ino, + } + + lookup_again: ++ if (!nfs4_valid_open_stateid(ctx->state)) { ++ trace_pnfs_update_layout(ino, pos, count, ++ iomode, lo, lseg, ++ PNFS_UPDATE_LAYOUT_INVALID_OPEN); ++ lseg = ERR_PTR(-EIO); ++ goto out; ++ } ++ + lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp)); + if (IS_ERR(lseg)) + goto out; +diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c +index 7911c4b3b5d35..710a54c7dffc5 100644 +--- a/fs/nfsd/nfssvc.c ++++ b/fs/nfsd/nfssvc.c +@@ -567,7 +567,6 @@ void nfsd_last_thread(struct net *net) + return; + + nfsd_shutdown_net(net); +- pr_info("nfsd: last server has exited, flushing export cache\n"); + nfsd_export_flush(net); + } + +@@ -783,7 +782,6 @@ int + nfsd_svc(int nrservs, struct net *net, const struct cred *cred) + { + int error; +- bool nfsd_up_before; + struct nfsd_net *nn = net_generic(net, nfsd_net_id); + struct svc_serv *serv; + +@@ -803,8 +801,6 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred) + error = nfsd_create_serv(net); + if (error) + goto out; +- +- nfsd_up_before = nn->nfsd_net_up; + serv = nn->nfsd_serv; + + error = nfsd_startup_net(net, cred); +@@ -812,17 +808,15 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred) + goto out_put; + error = svc_set_num_threads(serv, NULL, nrservs); + if (error) +- goto out_shutdown; ++ goto out_put; + error = serv->sv_nrthreads; +- if (error == 0) +- nfsd_last_thread(net); +-out_shutdown: +- if (error < 0 && !nfsd_up_before) +- nfsd_shutdown_net(net); + out_put: + /* Threads now hold service active */ + if (xchg(&nn->keep_active, 0)) + svc_put(serv); ++ ++ if (serv->sv_nrthreads == 0) ++ nfsd_last_thread(net); + svc_put(serv); + out: + mutex_unlock(&nfsd_mutex); +diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c +index 931a7744d1865..cf4fe21a50399 100644 +--- a/fs/ntfs3/bitmap.c ++++ b/fs/ntfs3/bitmap.c +@@ -654,7 +654,7 @@ int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits) + wnd->total_zeroes = nbits; + wnd->extent_max = MINUS_ONE_T; + wnd->zone_bit = wnd->zone_end = 0; +- wnd->nwnd = bytes_to_block(sb, bitmap_size(nbits)); ++ wnd->nwnd = bytes_to_block(sb, ntfs3_bitmap_size(nbits)); + wnd->bits_last = nbits & (wbits - 1); + if (!wnd->bits_last) + wnd->bits_last = wbits; +@@ -1347,7 +1347,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits) + return -EINVAL; + + /* Align to 8 byte boundary. */ +- new_wnd = bytes_to_block(sb, bitmap_size(new_bits)); ++ new_wnd = bytes_to_block(sb, ntfs3_bitmap_size(new_bits)); + new_last = new_bits & (wbits - 1); + if (!new_last) + new_last = wbits; +diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c +index c66b0eab6a160..e19b13db4f91e 100644 +--- a/fs/ntfs3/fsntfs.c ++++ b/fs/ntfs3/fsntfs.c +@@ -522,7 +522,7 @@ static int ntfs_extend_mft(struct ntfs_sb_info *sbi) + ni->mi.dirty = true; + + /* Step 2: Resize $MFT::BITMAP. */ +- new_bitmap_bytes = bitmap_size(new_mft_total); ++ new_bitmap_bytes = ntfs3_bitmap_size(new_mft_total); + + err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run, + new_bitmap_bytes, &new_bitmap_bytes, true, NULL); +diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c +index 0d8a96136b084..9089c58a005ce 100644 +--- a/fs/ntfs3/index.c ++++ b/fs/ntfs3/index.c +@@ -1456,8 +1456,8 @@ static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni, + + alloc->nres.valid_size = alloc->nres.data_size = cpu_to_le64(data_size); + +- err = ni_insert_resident(ni, bitmap_size(1), ATTR_BITMAP, in->name, +- in->name_len, &bitmap, NULL, NULL); ++ err = ni_insert_resident(ni, ntfs3_bitmap_size(1), ATTR_BITMAP, ++ in->name, in->name_len, &bitmap, NULL, NULL); + if (err) + goto out2; + +@@ -1518,8 +1518,9 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni, + if (bmp) { + /* Increase bitmap. */ + err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len, +- &indx->bitmap_run, bitmap_size(bit + 1), +- NULL, true, NULL); ++ &indx->bitmap_run, ++ ntfs3_bitmap_size(bit + 1), NULL, true, ++ NULL); + if (err) + goto out1; + } +@@ -2098,7 +2099,7 @@ static int indx_shrink(struct ntfs_index *indx, struct ntfs_inode *ni, + if (in->name == I30_NAME) + i_size_write(&ni->vfs_inode, new_data); + +- bpb = bitmap_size(bit); ++ bpb = ntfs3_bitmap_size(bit); + if (bpb * 8 == nbits) + return 0; + +diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h +index 1ca40c1d966b8..28788cf6ba407 100644 +--- a/fs/ntfs3/ntfs_fs.h ++++ b/fs/ntfs3/ntfs_fs.h +@@ -964,9 +964,9 @@ static inline bool run_is_empty(struct runs_tree *run) + } + + /* NTFS uses quad aligned bitmaps. */ +-static inline size_t bitmap_size(size_t bits) ++static inline size_t ntfs3_bitmap_size(size_t bits) + { +- return ALIGN((bits + 7) >> 3, 8); ++ return BITS_TO_U64(bits) * sizeof(u64); + } + + #define _100ns2seconds 10000000 +diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c +index d47cfa215a367..c14b55cdea85c 100644 +--- a/fs/ntfs3/super.c ++++ b/fs/ntfs3/super.c +@@ -1341,7 +1341,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc) + + /* Check bitmap boundary. */ + tt = sbi->used.bitmap.nbits; +- if (inode->i_size < bitmap_size(tt)) { ++ if (inode->i_size < ntfs3_bitmap_size(tt)) { + ntfs_err(sb, "$Bitmap is corrupted."); + err = -EINVAL; + goto put_inode_out; +diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c +index 7a2c9b153be6e..23dbde1de2520 100644 +--- a/fs/quota/dquot.c ++++ b/fs/quota/dquot.c +@@ -995,9 +995,8 @@ struct dquot *dqget(struct super_block *sb, struct kqid qid) + * smp_mb__before_atomic() in dquot_acquire(). + */ + smp_rmb(); +-#ifdef CONFIG_QUOTA_DEBUG +- BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */ +-#endif ++ /* Has somebody invalidated entry under us? */ ++ WARN_ON_ONCE(hlist_unhashed(&dquot->dq_hash)); + out: + if (empty) + do_destroy_dquot(empty); +diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c +index 689d8a506d459..48c27581ec511 100644 +--- a/fs/smb/client/reparse.c ++++ b/fs/smb/client/reparse.c +@@ -378,6 +378,8 @@ int parse_reparse_point(struct reparse_data_buffer *buf, + u32 plen, struct cifs_sb_info *cifs_sb, + bool unicode, struct cifs_open_info_data *data) + { ++ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); ++ + data->reparse.buf = buf; + + /* See MS-FSCC 2.1.2 */ +@@ -394,12 +396,13 @@ int parse_reparse_point(struct reparse_data_buffer *buf, + case IO_REPARSE_TAG_LX_FIFO: + case IO_REPARSE_TAG_LX_CHR: + case IO_REPARSE_TAG_LX_BLK: +- return 0; ++ break; + default: +- cifs_dbg(VFS, "%s: unhandled reparse tag: 0x%08x\n", +- __func__, le32_to_cpu(buf->ReparseTag)); +- return -EOPNOTSUPP; ++ cifs_tcon_dbg(VFS | ONCE, "unhandled reparse tag: 0x%08x\n", ++ le32_to_cpu(buf->ReparseTag)); ++ break; + } ++ return 0; + } + + int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb, +diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c +index 09e1e7771592f..7889df8112b4e 100644 +--- a/fs/smb/server/connection.c ++++ b/fs/smb/server/connection.c +@@ -165,11 +165,43 @@ void ksmbd_all_conn_set_status(u64 sess_id, u32 status) + up_read(&conn_list_lock); + } + +-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id) ++void ksmbd_conn_wait_idle(struct ksmbd_conn *conn) + { + wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2); + } + ++int ksmbd_conn_wait_idle_sess_id(struct ksmbd_conn *curr_conn, u64 sess_id) ++{ ++ struct ksmbd_conn *conn; ++ int rc, retry_count = 0, max_timeout = 120; ++ int rcount = 1; ++ ++retry_idle: ++ if (retry_count >= max_timeout) ++ return -EIO; ++ ++ down_read(&conn_list_lock); ++ list_for_each_entry(conn, &conn_list, conns_list) { ++ if (conn->binding || xa_load(&conn->sessions, sess_id)) { ++ if (conn == curr_conn) ++ rcount = 2; ++ if (atomic_read(&conn->req_running) >= rcount) { ++ rc = wait_event_timeout(conn->req_running_q, ++ atomic_read(&conn->req_running) < rcount, ++ HZ); ++ if (!rc) { ++ up_read(&conn_list_lock); ++ retry_count++; ++ goto retry_idle; ++ } ++ } ++ } ++ } ++ up_read(&conn_list_lock); ++ ++ return 0; ++} ++ + int ksmbd_conn_write(struct ksmbd_work *work) + { + struct ksmbd_conn *conn = work->conn; +diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h +index 0e04cf8b1d896..b93e5437793e0 100644 +--- a/fs/smb/server/connection.h ++++ b/fs/smb/server/connection.h +@@ -145,7 +145,8 @@ extern struct list_head conn_list; + extern struct rw_semaphore conn_list_lock; + + bool ksmbd_conn_alive(struct ksmbd_conn *conn); +-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id); ++void ksmbd_conn_wait_idle(struct ksmbd_conn *conn); ++int ksmbd_conn_wait_idle_sess_id(struct ksmbd_conn *curr_conn, u64 sess_id); + struct ksmbd_conn *ksmbd_conn_alloc(void); + void ksmbd_conn_free(struct ksmbd_conn *conn); + bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c); +diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c +index aec0a7a124052..dac5f984f1754 100644 +--- a/fs/smb/server/mgmt/user_session.c ++++ b/fs/smb/server/mgmt/user_session.c +@@ -310,6 +310,7 @@ void destroy_previous_session(struct ksmbd_conn *conn, + { + struct ksmbd_session *prev_sess; + struct ksmbd_user *prev_user; ++ int err; + + down_write(&sessions_table_lock); + down_write(&conn->session_lock); +@@ -324,8 +325,15 @@ void destroy_previous_session(struct ksmbd_conn *conn, + memcmp(user->passkey, prev_user->passkey, user->passkey_sz)) + goto out; + ++ ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_RECONNECT); ++ err = ksmbd_conn_wait_idle_sess_id(conn, id); ++ if (err) { ++ ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE); ++ goto out; ++ } + ksmbd_destroy_file_table(&prev_sess->file_table); + prev_sess->state = SMB2_SESSION_EXPIRED; ++ ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE); + out: + up_write(&conn->session_lock); + up_write(&sessions_table_lock); +diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c +index 592a2cdfd0670..da57adc2bd689 100644 +--- a/fs/smb/server/smb2pdu.c ++++ b/fs/smb/server/smb2pdu.c +@@ -2210,7 +2210,7 @@ int smb2_session_logoff(struct ksmbd_work *work) + ksmbd_conn_unlock(conn); + + ksmbd_close_session_fds(work); +- ksmbd_conn_wait_idle(conn, sess_id); ++ ksmbd_conn_wait_idle(conn); + + /* + * Re-lookup session to validate if session is deleted +@@ -4406,7 +4406,8 @@ int smb2_query_dir(struct ksmbd_work *work) + rsp->OutputBufferLength = cpu_to_le32(0); + rsp->Buffer[0] = 0; + rc = ksmbd_iov_pin_rsp(work, (void *)rsp, +- sizeof(struct smb2_query_directory_rsp)); ++ offsetof(struct smb2_query_directory_rsp, Buffer) ++ + 1); + if (rc) + goto err_out; + } else { +diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h +index f5fbc15e56980..a3c9dd4a1ac33 100644 +--- a/include/acpi/acpixf.h ++++ b/include/acpi/acpixf.h +@@ -660,12 +660,9 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status + void *context)) + ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_execute_reg_methods(acpi_handle device, ++ u32 nax_depth, + acpi_adr_space_type + space_id)) +-ACPI_EXTERNAL_RETURN_STATUS(acpi_status +- acpi_execute_orphan_reg_method(acpi_handle device, +- acpi_adr_space_type +- space_id)) + ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_remove_address_space_handler(acpi_handle + device, +diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h +index 03644237e1efb..3c8d2b87a9edc 100644 +--- a/include/linux/bitmap.h ++++ b/include/linux/bitmap.h +@@ -237,9 +237,11 @@ extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp, + #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) + #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) + ++#define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE) ++ + static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) + { +- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); ++ unsigned int len = bitmap_size(nbits); + + if (small_const_nbits(nbits)) + *dst = 0; +@@ -249,7 +251,7 @@ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) + + static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) + { +- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); ++ unsigned int len = bitmap_size(nbits); + + if (small_const_nbits(nbits)) + *dst = ~0UL; +@@ -260,7 +262,7 @@ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) + static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, + unsigned int nbits) + { +- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); ++ unsigned int len = bitmap_size(nbits); + + if (small_const_nbits(nbits)) + *dst = *src; +@@ -279,6 +281,18 @@ static inline void bitmap_copy_clear_tail(unsigned long *dst, + dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits); + } + ++static inline void bitmap_copy_and_extend(unsigned long *to, ++ const unsigned long *from, ++ unsigned int count, unsigned int size) ++{ ++ unsigned int copy = BITS_TO_LONGS(count); ++ ++ memcpy(to, from, copy * sizeof(long)); ++ if (count % BITS_PER_LONG) ++ to[copy - 1] &= BITMAP_LAST_WORD_MASK(count); ++ memset(to + copy, 0, bitmap_size(size) - copy * sizeof(long)); ++} ++ + /* + * On 32-bit systems bitmaps are represented as u32 arrays internally. On LE64 + * machines the order of hi and lo parts of numbers match the bitmap structure. +diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h +index b62535fd8de5f..92919d52f7e1b 100644 +--- a/include/linux/bpf_verifier.h ++++ b/include/linux/bpf_verifier.h +@@ -760,8 +760,8 @@ static inline u32 type_flag(u32 type) + /* only use after check_attach_btf_id() */ + static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog) + { +- return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->dst_prog) ? +- prog->aux->dst_prog->type : prog->type; ++ return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->saved_dst_prog_type) ? ++ prog->aux->saved_dst_prog_type : prog->type; + } + + static inline bool bpf_prog_check_recur(const struct bpf_prog *prog) +diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h +index f10fb87d49dbe..dbdbf1451cadd 100644 +--- a/include/linux/cpumask.h ++++ b/include/linux/cpumask.h +@@ -821,7 +821,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp) + */ + static inline unsigned int cpumask_size(void) + { +- return BITS_TO_LONGS(large_cpumask_bits) * sizeof(long); ++ return bitmap_size(large_cpumask_bits); + } + + /* +diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h +index f2fc203fb8a1a..3a8a015fdd2ed 100644 +--- a/include/linux/dma-map-ops.h ++++ b/include/linux/dma-map-ops.h +@@ -28,7 +28,7 @@ struct dma_map_ops { + unsigned long attrs); + void (*free)(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs); +- struct page *(*alloc_pages)(struct device *dev, size_t size, ++ struct page *(*alloc_pages_op)(struct device *dev, size_t size, + dma_addr_t *dma_handle, enum dma_data_direction dir, + gfp_t gfp); + void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, +diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h +index dca2969015d80..6fbfbde68a37c 100644 +--- a/include/linux/dsa/ocelot.h ++++ b/include/linux/dsa/ocelot.h +@@ -5,6 +5,8 @@ + #ifndef _NET_DSA_TAG_OCELOT_H + #define _NET_DSA_TAG_OCELOT_H + ++#include ++#include + #include + #include + #include +@@ -273,4 +275,49 @@ static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb) + return rew_op; + } + ++/** ++ * ocelot_xmit_get_vlan_info: Determine VLAN_TCI and TAG_TYPE for injected frame ++ * @skb: Pointer to socket buffer ++ * @br: Pointer to bridge device that the port is under, if any ++ * @vlan_tci: ++ * @tag_type: ++ * ++ * If the port is under a VLAN-aware bridge, remove the VLAN header from the ++ * payload and move it into the DSA tag, which will make the switch classify ++ * the packet to the bridge VLAN. Otherwise, leave the classified VLAN at zero, ++ * which is the pvid of standalone ports (OCELOT_STANDALONE_PVID), although not ++ * of VLAN-unaware bridge ports (that would be ocelot_vlan_unaware_pvid()). ++ * Anyway, VID 0 is fine because it is stripped on egress for these port modes, ++ * and source address learning is not performed for packets injected from the ++ * CPU anyway, so it doesn't matter that the VID is "wrong". ++ */ ++static inline void ocelot_xmit_get_vlan_info(struct sk_buff *skb, ++ struct net_device *br, ++ u64 *vlan_tci, u64 *tag_type) ++{ ++ struct vlan_ethhdr *hdr; ++ u16 proto, tci; ++ ++ if (!br || !br_vlan_enabled(br)) { ++ *vlan_tci = 0; ++ *tag_type = IFH_TAG_TYPE_C; ++ return; ++ } ++ ++ hdr = (struct vlan_ethhdr *)skb_mac_header(skb); ++ br_vlan_get_proto(br, &proto); ++ ++ if (ntohs(hdr->h_vlan_proto) == proto) { ++ vlan_remove_tag(skb, &tci); ++ *vlan_tci = tci; ++ } else { ++ rcu_read_lock(); ++ br_vlan_get_pvid_rcu(br, &tci); ++ rcu_read_unlock(); ++ *vlan_tci = tci; ++ } ++ ++ *tag_type = (proto != ETH_P_8021Q) ? IFH_TAG_TYPE_S : IFH_TAG_TYPE_C; ++} ++ + #endif +diff --git a/include/linux/evm.h b/include/linux/evm.h +index 01fc495a83e27..36ec884320d9f 100644 +--- a/include/linux/evm.h ++++ b/include/linux/evm.h +@@ -31,6 +31,7 @@ extern void evm_inode_post_setxattr(struct dentry *dentry, + const char *xattr_name, + const void *xattr_value, + size_t xattr_value_len); ++extern int evm_inode_copy_up_xattr(const char *name); + extern int evm_inode_removexattr(struct mnt_idmap *idmap, + struct dentry *dentry, const char *xattr_name); + extern void evm_inode_post_removexattr(struct dentry *dentry, +@@ -117,6 +118,11 @@ static inline void evm_inode_post_setxattr(struct dentry *dentry, + return; + } + ++static inline int evm_inode_copy_up_xattr(const char *name) ++{ ++ return 0; ++} ++ + static inline int evm_inode_removexattr(struct mnt_idmap *idmap, + struct dentry *dentry, + const char *xattr_name) +diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h +index 3b04657787d09..1352a24d72ef4 100644 +--- a/include/linux/f2fs_fs.h ++++ b/include/linux/f2fs_fs.h +@@ -76,6 +76,7 @@ enum stop_cp_reason { + STOP_CP_REASON_CORRUPTED_SUMMARY, + STOP_CP_REASON_UPDATE_INODE, + STOP_CP_REASON_FLUSH_FAIL, ++ STOP_CP_REASON_NO_SEGMENT, + STOP_CP_REASON_MAX, + }; + +diff --git a/include/linux/fs.h b/include/linux/fs.h +index 43e640fb4a7f7..2a554e4045a9a 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -2265,6 +2265,9 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, + * + * I_PINNING_FSCACHE_WB Inode is pinning an fscache object for writeback. + * ++ * I_LRU_ISOLATING Inode is pinned being isolated from LRU without holding ++ * i_count. ++ * + * Q: What is the difference between I_WILL_FREE and I_FREEING? + */ + #define I_DIRTY_SYNC (1 << 0) +@@ -2288,6 +2291,8 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, + #define I_DONTCACHE (1 << 16) + #define I_SYNC_QUEUED (1 << 17) + #define I_PINNING_FSCACHE_WB (1 << 18) ++#define __I_LRU_ISOLATING 19 ++#define I_LRU_ISOLATING (1 << __I_LRU_ISOLATING) + + #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) + #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES) +diff --git a/include/linux/slab.h b/include/linux/slab.h +index 5a67dab27f831..a761cc3559f29 100644 +--- a/include/linux/slab.h ++++ b/include/linux/slab.h +@@ -245,8 +245,9 @@ DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T)) + size_t ksize(const void *objp); + + #ifdef CONFIG_PRINTK +-bool kmem_valid_obj(void *object); +-void kmem_dump_obj(void *object); ++bool kmem_dump_obj(void *object); ++#else ++static inline bool kmem_dump_obj(void *object) { return false; } + #endif + + /* +diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h +index dc3cb16835b63..f8b09a82f62e1 100644 +--- a/include/net/af_vsock.h ++++ b/include/net/af_vsock.h +@@ -227,8 +227,12 @@ struct vsock_tap { + int vsock_add_tap(struct vsock_tap *vt); + int vsock_remove_tap(struct vsock_tap *vt); + void vsock_deliver_tap(struct sk_buff *build_skb(void *opaque), void *opaque); ++int __vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, ++ int flags); + int vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + int flags); ++int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, ++ size_t len, int flags); + int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, + size_t len, int flags); + +diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h +index 4a8e578405cb3..9365e5af8d6da 100644 +--- a/include/net/inet_timewait_sock.h ++++ b/include/net/inet_timewait_sock.h +@@ -114,7 +114,7 @@ static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo + + void inet_twsk_deschedule_put(struct inet_timewait_sock *tw); + +-void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family); ++void inet_twsk_purge(struct inet_hashinfo *hashinfo); + + static inline + struct net *twsk_net(const struct inet_timewait_sock *twsk) +diff --git a/include/net/kcm.h b/include/net/kcm.h +index 90279e5e09a5c..441e993be634c 100644 +--- a/include/net/kcm.h ++++ b/include/net/kcm.h +@@ -70,6 +70,7 @@ struct kcm_sock { + struct work_struct tx_work; + struct list_head wait_psock_list; + struct sk_buff *seq_skb; ++ struct mutex tx_mutex; + u32 tx_stopped : 1; + + /* Don't use bit fields here, these are set under different locks */ +diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h +index cd526fd31b458..37ccce56a73e8 100644 +--- a/include/net/mana/mana.h ++++ b/include/net/mana/mana.h +@@ -274,6 +274,7 @@ struct mana_cq { + /* NAPI data */ + struct napi_struct napi; + int work_done; ++ int work_done_since_doorbell; + int budget; + }; + +diff --git a/include/net/tcp.h b/include/net/tcp.h +index 71af244104433..cc3b56bf19e05 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -344,7 +344,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb); + void tcp_rcv_space_adjust(struct sock *sk); + int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); + void tcp_twsk_destructor(struct sock *sk); +-void tcp_twsk_purge(struct list_head *net_exit_list, int family); ++void tcp_twsk_purge(struct list_head *net_exit_list); + ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, + unsigned int flags); +diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h +index 526def14e7fb7..6ae00983a6126 100644 +--- a/include/scsi/scsi_cmnd.h ++++ b/include/scsi/scsi_cmnd.h +@@ -234,7 +234,7 @@ static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd) + + static inline unsigned int scsi_logical_block_count(struct scsi_cmnd *scmd) + { +- unsigned int shift = ilog2(scmd->device->sector_size) - SECTOR_SHIFT; ++ unsigned int shift = ilog2(scmd->device->sector_size); + + return blk_rq_bytes(scsi_cmd_to_rq(scmd)) >> shift; + } +diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h +index 1e1b40f4e664e..846132ca5503d 100644 +--- a/include/soc/mscc/ocelot.h ++++ b/include/soc/mscc/ocelot.h +@@ -813,6 +813,9 @@ struct ocelot { + const u32 *const *map; + struct list_head stats_regions; + ++ spinlock_t inj_lock; ++ spinlock_t xtr_lock; ++ + u32 pool_size[OCELOT_SB_NUM][OCELOT_SB_POOL_NUM]; + int packet_buffer_size; + int num_frame_refs; +@@ -966,10 +969,17 @@ void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target, + u32 val, u32 reg, u32 offset); + + /* Packet I/O */ ++void ocelot_lock_inj_grp(struct ocelot *ocelot, int grp); ++void ocelot_unlock_inj_grp(struct ocelot *ocelot, int grp); ++void ocelot_lock_xtr_grp(struct ocelot *ocelot, int grp); ++void ocelot_unlock_xtr_grp(struct ocelot *ocelot, int grp); ++void ocelot_lock_xtr_grp_bh(struct ocelot *ocelot, int grp); ++void ocelot_unlock_xtr_grp_bh(struct ocelot *ocelot, int grp); + bool ocelot_can_inject(struct ocelot *ocelot, int grp); + void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp, + u32 rew_op, struct sk_buff *skb); +-void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag); ++void ocelot_ifh_set_basic(void *ifh, struct ocelot *ocelot, int port, ++ u32 rew_op, struct sk_buff *skb); + int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **skb); + void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp); + void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb, +diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h +index 91583690bddc5..f33d914d8f469 100644 +--- a/include/uapi/misc/fastrpc.h ++++ b/include/uapi/misc/fastrpc.h +@@ -8,14 +8,11 @@ + #define FASTRPC_IOCTL_ALLOC_DMA_BUFF _IOWR('R', 1, struct fastrpc_alloc_dma_buf) + #define FASTRPC_IOCTL_FREE_DMA_BUFF _IOWR('R', 2, __u32) + #define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke) +-/* This ioctl is only supported with secure device nodes */ + #define FASTRPC_IOCTL_INIT_ATTACH _IO('R', 4) + #define FASTRPC_IOCTL_INIT_CREATE _IOWR('R', 5, struct fastrpc_init_create) + #define FASTRPC_IOCTL_MMAP _IOWR('R', 6, struct fastrpc_req_mmap) + #define FASTRPC_IOCTL_MUNMAP _IOWR('R', 7, struct fastrpc_req_munmap) +-/* This ioctl is only supported with secure device nodes */ + #define FASTRPC_IOCTL_INIT_ATTACH_SNS _IO('R', 8) +-/* This ioctl is only supported with secure device nodes */ + #define FASTRPC_IOCTL_INIT_CREATE_STATIC _IOWR('R', 9, struct fastrpc_init_create_static) + #define FASTRPC_IOCTL_MEM_MAP _IOWR('R', 10, struct fastrpc_mem_map) + #define FASTRPC_IOCTL_MEM_UNMAP _IOWR('R', 11, struct fastrpc_mem_unmap) +diff --git a/init/Kconfig b/init/Kconfig +index e173364abd6c0..6054ba684c539 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1916,12 +1916,15 @@ config RUST + config RUSTC_VERSION_TEXT + string + depends on RUST +- default $(shell,command -v $(RUSTC) >/dev/null 2>&1 && $(RUSTC) --version || echo n) ++ default "$(shell,$(RUSTC) --version 2>/dev/null)" + + config BINDGEN_VERSION_TEXT + string + depends on RUST +- default $(shell,command -v $(BINDGEN) >/dev/null 2>&1 && $(BINDGEN) --version || echo n) ++ # The dummy parameter `workaround-for-0.69.0` is required to support 0.69.0 ++ # (https://github.com/rust-lang/rust-bindgen/pull/2678). It can be removed when ++ # the minimum version is upgraded past that (0.69.1 already fixed the issue). ++ default "$(shell,$(BINDGEN) --version workaround-for-0.69.0 2>/dev/null)" + + # + # Place an empty function call at each tracepoint site. Can be +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 171045b6956d9..3f1a9cd7fc9ec 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -16124,8 +16124,9 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, + spi = i / BPF_REG_SIZE; + + if (exact && +- old->stack[spi].slot_type[i % BPF_REG_SIZE] != +- cur->stack[spi].slot_type[i % BPF_REG_SIZE]) ++ (i >= cur->allocated_stack || ++ old->stack[spi].slot_type[i % BPF_REG_SIZE] != ++ cur->stack[spi].slot_type[i % BPF_REG_SIZE])) + return false; + + if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ) && !exact) { +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c +index d872fff901073..5eca6281d1aa6 100644 +--- a/kernel/cgroup/cgroup.c ++++ b/kernel/cgroup/cgroup.c +@@ -1728,13 +1728,13 @@ static int css_populate_dir(struct cgroup_subsys_state *css) + + if (!css->ss) { + if (cgroup_on_dfl(cgrp)) { +- ret = cgroup_addrm_files(&cgrp->self, cgrp, ++ ret = cgroup_addrm_files(css, cgrp, + cgroup_base_files, true); + if (ret < 0) + return ret; + + if (cgroup_psi_enabled()) { +- ret = cgroup_addrm_files(&cgrp->self, cgrp, ++ ret = cgroup_addrm_files(css, cgrp, + cgroup_psi_files, true); + if (ret < 0) + return ret; +diff --git a/kernel/cpu.c b/kernel/cpu.c +index 874bfb952e6e8..0c72b94ed076a 100644 +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -2728,6 +2728,16 @@ int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) + return ret; + } + ++/** ++ * Check if the core a CPU belongs to is online ++ */ ++#if !defined(topology_is_core_online) ++static inline bool topology_is_core_online(unsigned int cpu) ++{ ++ return true; ++} ++#endif ++ + int cpuhp_smt_enable(void) + { + int cpu, ret = 0; +@@ -2738,7 +2748,7 @@ int cpuhp_smt_enable(void) + /* Skip online CPUs and CPUs on offline nodes */ + if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) + continue; +- if (!cpu_smt_thread_allowed(cpu)) ++ if (!cpu_smt_thread_allowed(cpu) || !topology_is_core_online(cpu)) + continue; + ret = _cpu_up(cpu, 0, CPUHP_ONLINE); + if (ret) +diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c +index f1d9f01b283d7..2923f3b2dd2c7 100644 +--- a/kernel/dma/mapping.c ++++ b/kernel/dma/mapping.c +@@ -570,9 +570,9 @@ static struct page *__dma_alloc_pages(struct device *dev, size_t size, + size = PAGE_ALIGN(size); + if (dma_alloc_direct(dev, ops)) + return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); +- if (!ops->alloc_pages) ++ if (!ops->alloc_pages_op) + return NULL; +- return ops->alloc_pages(dev, size, dma_handle, dir, gfp); ++ return ops->alloc_pages_op(dev, size, dma_handle, dir, gfp); + } + + struct page *dma_alloc_pages(struct device *dev, size_t size, +diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h +index 3d1851f82dbb6..de0afabfbd440 100644 +--- a/kernel/rcu/rcu.h ++++ b/kernel/rcu/rcu.h +@@ -10,6 +10,7 @@ + #ifndef __LINUX_RCU_H + #define __LINUX_RCU_H + ++#include + #include + + /* +@@ -248,6 +249,12 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head) + } + #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ + ++static inline void debug_rcu_head_callback(struct rcu_head *rhp) ++{ ++ if (unlikely(!rhp->func)) ++ kmem_dump_obj(rhp); ++} ++ + extern int rcu_cpu_stall_suppress_at_boot; + + static inline bool rcu_stall_is_suppressed_at_boot(void) +diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c +index 336af24e0fe35..c38e5933a5d69 100644 +--- a/kernel/rcu/srcutiny.c ++++ b/kernel/rcu/srcutiny.c +@@ -138,6 +138,7 @@ void srcu_drive_gp(struct work_struct *wp) + while (lh) { + rhp = lh; + lh = lh->next; ++ debug_rcu_head_callback(rhp); + local_bh_disable(); + rhp->func(rhp); + local_bh_enable(); +diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c +index 25285893e44e7..2f770a9a2a13a 100644 +--- a/kernel/rcu/srcutree.c ++++ b/kernel/rcu/srcutree.c +@@ -1735,6 +1735,7 @@ static void srcu_invoke_callbacks(struct work_struct *work) + rhp = rcu_cblist_dequeue(&ready_cbs); + for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { + debug_rcu_head_unqueue(rhp); ++ debug_rcu_head_callback(rhp); + local_bh_disable(); + rhp->func(rhp); + local_bh_enable(); +diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h +index ff8d539ee22be..df81506cf2bde 100644 +--- a/kernel/rcu/tasks.h ++++ b/kernel/rcu/tasks.h +@@ -538,6 +538,7 @@ static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu + raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); + len = rcl.len; + for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) { ++ debug_rcu_head_callback(rhp); + local_bh_disable(); + rhp->func(rhp); + local_bh_enable(); +diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c +index 42f7589e51e09..fec804b790803 100644 +--- a/kernel/rcu/tiny.c ++++ b/kernel/rcu/tiny.c +@@ -97,6 +97,7 @@ static inline bool rcu_reclaim_tiny(struct rcu_head *head) + + trace_rcu_invoke_callback("", head); + f = head->func; ++ debug_rcu_head_callback(head); + WRITE_ONCE(head->func, (rcu_callback_t)0L); + f(head); + rcu_lock_release(&rcu_callback_map); +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c +index 583cc29080764..8541084eeba50 100644 +--- a/kernel/rcu/tree.c ++++ b/kernel/rcu/tree.c +@@ -1298,7 +1298,7 @@ EXPORT_SYMBOL_GPL(rcu_gp_slow_register); + /* Unregister a counter, with NULL for not caring which. */ + void rcu_gp_slow_unregister(atomic_t *rgssp) + { +- WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress); ++ WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress && rcu_gp_slow_suppress != NULL); + + WRITE_ONCE(rcu_gp_slow_suppress, NULL); + } +@@ -2185,6 +2185,7 @@ static void rcu_do_batch(struct rcu_data *rdp) + trace_rcu_invoke_callback(rcu_state.name, rhp); + + f = rhp->func; ++ debug_rcu_head_callback(rhp); + WRITE_ONCE(rhp->func, (rcu_callback_t)0L); + f(rhp); + +diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c +index 8c1e183329d97..3a13cecf17740 100644 +--- a/kernel/sched/topology.c ++++ b/kernel/sched/topology.c +@@ -2126,6 +2126,9 @@ int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node) + struct cpumask ***hop_masks; + int hop, ret = nr_cpu_ids; + ++ if (node == NUMA_NO_NODE) ++ return cpumask_nth_and(cpu, cpus, cpu_online_mask); ++ + rcu_read_lock(); + + /* CPU-less node entries are uninitialized in sched_domains_numa_masks */ +diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c +index 3260bbe98894b..aa864999dc21b 100644 +--- a/kernel/time/clocksource.c ++++ b/kernel/time/clocksource.c +@@ -20,6 +20,16 @@ + #include "tick-internal.h" + #include "timekeeping_internal.h" + ++static noinline u64 cycles_to_nsec_safe(struct clocksource *cs, u64 start, u64 end) ++{ ++ u64 delta = clocksource_delta(end, start, cs->mask); ++ ++ if (likely(delta < cs->max_cycles)) ++ return clocksource_cyc2ns(delta, cs->mult, cs->shift); ++ ++ return mul_u64_u32_shr(delta, cs->mult, cs->shift); ++} ++ + /** + * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks + * @mult: pointer to mult variable +@@ -222,8 +232,8 @@ enum wd_read_status { + static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow) + { + unsigned int nretries, max_retries; +- u64 wd_end, wd_end2, wd_delta; + int64_t wd_delay, wd_seq_delay; ++ u64 wd_end, wd_end2; + + max_retries = clocksource_get_max_watchdog_retry(); + for (nretries = 0; nretries <= max_retries; nretries++) { +@@ -234,9 +244,7 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, + wd_end2 = watchdog->read(watchdog); + local_irq_enable(); + +- wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask); +- wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, +- watchdog->shift); ++ wd_delay = cycles_to_nsec_safe(watchdog, *wdnow, wd_end); + if (wd_delay <= WATCHDOG_MAX_SKEW) { + if (nretries > 1 && nretries >= max_retries) { + pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n", +@@ -254,8 +262,7 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, + * report system busy, reinit the watchdog and skip the current + * watchdog test. + */ +- wd_delta = clocksource_delta(wd_end2, wd_end, watchdog->mask); +- wd_seq_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, watchdog->shift); ++ wd_seq_delay = cycles_to_nsec_safe(watchdog, wd_end, wd_end2); + if (wd_seq_delay > WATCHDOG_MAX_SKEW/2) + goto skip_test; + } +@@ -366,8 +373,7 @@ void clocksource_verify_percpu(struct clocksource *cs) + delta = (csnow_end - csnow_mid) & cs->mask; + if (delta < 0) + cpumask_set_cpu(cpu, &cpus_ahead); +- delta = clocksource_delta(csnow_end, csnow_begin, cs->mask); +- cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); ++ cs_nsec = cycles_to_nsec_safe(cs, csnow_begin, csnow_end); + if (cs_nsec > cs_nsec_max) + cs_nsec_max = cs_nsec; + if (cs_nsec < cs_nsec_min) +@@ -398,8 +404,8 @@ static inline void clocksource_reset_watchdog(void) + + static void clocksource_watchdog(struct timer_list *unused) + { +- u64 csnow, wdnow, cslast, wdlast, delta; + int64_t wd_nsec, cs_nsec, interval; ++ u64 csnow, wdnow, cslast, wdlast; + int next_cpu, reset_pending; + struct clocksource *cs; + enum wd_read_status read_ret; +@@ -456,12 +462,8 @@ static void clocksource_watchdog(struct timer_list *unused) + continue; + } + +- delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); +- wd_nsec = clocksource_cyc2ns(delta, watchdog->mult, +- watchdog->shift); +- +- delta = clocksource_delta(csnow, cs->cs_last, cs->mask); +- cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); ++ wd_nsec = cycles_to_nsec_safe(watchdog, cs->wd_last, wdnow); ++ cs_nsec = cycles_to_nsec_safe(cs, cs->cs_last, csnow); + wdlast = cs->wd_last; /* save these in case we print them */ + cslast = cs->cs_last; + cs->cs_last = csnow; +@@ -832,7 +834,7 @@ void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles) + */ + u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now) + { +- u64 now, delta, nsec = 0; ++ u64 now, nsec = 0; + + if (!suspend_clocksource) + return 0; +@@ -847,12 +849,8 @@ u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now) + else + now = suspend_clocksource->read(suspend_clocksource); + +- if (now > suspend_start) { +- delta = clocksource_delta(now, suspend_start, +- suspend_clocksource->mask); +- nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult, +- suspend_clocksource->shift); +- } ++ if (now > suspend_start) ++ nsec = cycles_to_nsec_safe(suspend_clocksource, suspend_start, now); + + /* + * Disable the suspend timer to save power if current clocksource is +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index edb0f821dceaa..57e5cb36f1bc9 100644 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -38,6 +38,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -1287,6 +1288,8 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, + struct hrtimer_clock_base *base; + unsigned long flags; + ++ if (WARN_ON_ONCE(!timer->function)) ++ return; + /* + * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft + * match on CONFIG_PREEMPT_RT = n. With PREEMPT_RT check the hard +@@ -2223,8 +2226,8 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, + + int hrtimers_cpu_dying(unsigned int dying_cpu) + { ++ int i, ncpu = cpumask_any_and(cpu_active_mask, housekeeping_cpumask(HK_TYPE_TIMER)); + struct hrtimer_cpu_base *old_base, *new_base; +- int i, ncpu = cpumask_first(cpu_active_mask); + + tick_cancel_sched_timer(dying_cpu); + +diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h +index 5ed5a9d41d5a7..03a586e12cf89 100644 +--- a/kernel/time/tick-sched.h ++++ b/kernel/time/tick-sched.h +@@ -61,7 +61,6 @@ struct tick_sched { + unsigned int tick_stopped : 1; + unsigned int idle_active : 1; + unsigned int do_timer_last : 1; +- unsigned int got_idle_tick : 1; + + /* Tick handling: jiffies stall check */ + unsigned int stalled_jiffies; +@@ -73,6 +72,7 @@ struct tick_sched { + ktime_t next_tick; + unsigned long idle_jiffies; + ktime_t idle_waketime; ++ unsigned int got_idle_tick; + + /* Idle entry */ + seqcount_t idle_sleeptime_seq; +diff --git a/lib/cpumask.c b/lib/cpumask.c +index a7fd02b5ae264..34335c1e72653 100644 +--- a/lib/cpumask.c ++++ b/lib/cpumask.c +@@ -146,9 +146,7 @@ unsigned int cpumask_local_spread(unsigned int i, int node) + /* Wrap: we always want a cpu. */ + i %= num_online_cpus(); + +- cpu = (node == NUMA_NO_NODE) ? +- cpumask_nth(i, cpu_online_mask) : +- sched_numa_find_nth_cpu(cpu_online_mask, i, node); ++ cpu = sched_numa_find_nth_cpu(cpu_online_mask, i, node); + + WARN_ON(cpu >= nr_cpu_ids); + return cpu; +diff --git a/lib/math/prime_numbers.c b/lib/math/prime_numbers.c +index d42cebf7407fc..d3b64b10da1c5 100644 +--- a/lib/math/prime_numbers.c ++++ b/lib/math/prime_numbers.c +@@ -6,8 +6,6 @@ + #include + #include + +-#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long)) +- + struct primes { + struct rcu_head rcu; + unsigned long last, sz; +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index f2816c9a1f3ec..9aea11b1477c8 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -1504,7 +1504,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) + vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); + if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { + spin_unlock(vmf->ptl); +- goto out; ++ return 0; + } + + pmd = pmd_modify(oldpmd, vma->vm_page_prot); +@@ -1548,23 +1548,16 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) + if (migrated) { + flags |= TNF_MIGRATED; + page_nid = target_nid; +- } else { +- flags |= TNF_MIGRATE_FAIL; +- vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); +- if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { +- spin_unlock(vmf->ptl); +- goto out; +- } +- goto out_map; ++ task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags); ++ return 0; + } + +-out: +- if (page_nid != NUMA_NO_NODE) +- task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, +- flags); +- +- return 0; +- ++ flags |= TNF_MIGRATE_FAIL; ++ vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); ++ if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { ++ spin_unlock(vmf->ptl); ++ return 0; ++ } + out_map: + /* Restore the PMD */ + pmd = pmd_modify(oldpmd, vma->vm_page_prot); +@@ -1574,7 +1567,10 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) + set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); + update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); + spin_unlock(vmf->ptl); +- goto out; ++ ++ if (page_nid != NUMA_NO_NODE) ++ task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags); ++ return 0; + } + + /* +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index fd1b707f5de40..110afda740a18 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -4880,9 +4880,12 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of, + buf = endp + 1; + + cfd = simple_strtoul(buf, &endp, 10); +- if ((*endp != ' ') && (*endp != '\0')) ++ if (*endp == '\0') ++ buf = endp; ++ else if (*endp == ' ') ++ buf = endp + 1; ++ else + return -EINVAL; +- buf = endp + 1; + + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) +diff --git a/mm/memory-failure.c b/mm/memory-failure.c +index f21b4fb1e84ca..9018a1162efc9 100644 +--- a/mm/memory-failure.c ++++ b/mm/memory-failure.c +@@ -2395,7 +2395,7 @@ struct memory_failure_entry { + struct memory_failure_cpu { + DECLARE_KFIFO(fifo, struct memory_failure_entry, + MEMORY_FAILURE_FIFO_SIZE); +- spinlock_t lock; ++ raw_spinlock_t lock; + struct work_struct work; + }; + +@@ -2421,20 +2421,22 @@ void memory_failure_queue(unsigned long pfn, int flags) + { + struct memory_failure_cpu *mf_cpu; + unsigned long proc_flags; ++ bool buffer_overflow; + struct memory_failure_entry entry = { + .pfn = pfn, + .flags = flags, + }; + + mf_cpu = &get_cpu_var(memory_failure_cpu); +- spin_lock_irqsave(&mf_cpu->lock, proc_flags); +- if (kfifo_put(&mf_cpu->fifo, entry)) ++ raw_spin_lock_irqsave(&mf_cpu->lock, proc_flags); ++ buffer_overflow = !kfifo_put(&mf_cpu->fifo, entry); ++ if (!buffer_overflow) + schedule_work_on(smp_processor_id(), &mf_cpu->work); +- else ++ raw_spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); ++ put_cpu_var(memory_failure_cpu); ++ if (buffer_overflow) + pr_err("buffer overflow when queuing memory failure at %#lx\n", + pfn); +- spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); +- put_cpu_var(memory_failure_cpu); + } + EXPORT_SYMBOL_GPL(memory_failure_queue); + +@@ -2447,9 +2449,9 @@ static void memory_failure_work_func(struct work_struct *work) + + mf_cpu = container_of(work, struct memory_failure_cpu, work); + for (;;) { +- spin_lock_irqsave(&mf_cpu->lock, proc_flags); ++ raw_spin_lock_irqsave(&mf_cpu->lock, proc_flags); + gotten = kfifo_get(&mf_cpu->fifo, &entry); +- spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); ++ raw_spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); + if (!gotten) + break; + if (entry.flags & MF_SOFT_OFFLINE) +@@ -2479,7 +2481,7 @@ static int __init memory_failure_init(void) + + for_each_possible_cpu(cpu) { + mf_cpu = &per_cpu(memory_failure_cpu, cpu); +- spin_lock_init(&mf_cpu->lock); ++ raw_spin_lock_init(&mf_cpu->lock); + INIT_KFIFO(mf_cpu->fifo); + INIT_WORK(&mf_cpu->work, memory_failure_work_func); + } +diff --git a/mm/memory.c b/mm/memory.c +index 58408bf96e0eb..bfd2273cb4b46 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -4775,7 +4775,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) + spin_lock(vmf->ptl); + if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { + pte_unmap_unlock(vmf->pte, vmf->ptl); +- goto out; ++ return 0; + } + + /* Get the normal PTE */ +@@ -4840,23 +4840,19 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) + if (migrate_misplaced_page(page, vma, target_nid)) { + page_nid = target_nid; + flags |= TNF_MIGRATED; +- } else { +- flags |= TNF_MIGRATE_FAIL; +- vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, +- vmf->address, &vmf->ptl); +- if (unlikely(!vmf->pte)) +- goto out; +- if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { +- pte_unmap_unlock(vmf->pte, vmf->ptl); +- goto out; +- } +- goto out_map; ++ task_numa_fault(last_cpupid, page_nid, 1, flags); ++ return 0; + } + +-out: +- if (page_nid != NUMA_NO_NODE) +- task_numa_fault(last_cpupid, page_nid, 1, flags); +- return 0; ++ flags |= TNF_MIGRATE_FAIL; ++ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, ++ vmf->address, &vmf->ptl); ++ if (unlikely(!vmf->pte)) ++ return 0; ++ if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { ++ pte_unmap_unlock(vmf->pte, vmf->ptl); ++ return 0; ++ } + out_map: + /* + * Make it present again, depending on how arch implements +@@ -4870,7 +4866,10 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) + ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); + update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); + pte_unmap_unlock(vmf->pte, vmf->ptl); +- goto out; ++ ++ if (page_nid != NUMA_NO_NODE) ++ task_numa_fault(last_cpupid, page_nid, 1, flags); ++ return 0; + } + + static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf) +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 39bdbfb5313fb..edb32635037f4 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -302,7 +302,7 @@ EXPORT_SYMBOL(nr_online_nodes); + + static bool page_contains_unaccepted(struct page *page, unsigned int order); + static void accept_page(struct page *page, unsigned int order); +-static bool try_to_accept_memory(struct zone *zone, unsigned int order); ++static bool cond_accept_memory(struct zone *zone, unsigned int order); + static inline bool has_unaccepted_memory(void); + static bool __free_unaccepted(struct page *page); + +@@ -2830,9 +2830,6 @@ static inline long __zone_watermark_unusable_free(struct zone *z, + if (!(alloc_flags & ALLOC_CMA)) + unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); + #endif +-#ifdef CONFIG_UNACCEPTED_MEMORY +- unusable_free += zone_page_state(z, NR_UNACCEPTED); +-#endif + + return unusable_free; + } +@@ -3126,16 +3123,16 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, + } + } + ++ cond_accept_memory(zone, order); ++ + mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); + if (!zone_watermark_fast(zone, order, mark, + ac->highest_zoneidx, alloc_flags, + gfp_mask)) { + int ret; + +- if (has_unaccepted_memory()) { +- if (try_to_accept_memory(zone, order)) +- goto try_this_zone; +- } ++ if (cond_accept_memory(zone, order)) ++ goto try_this_zone; + + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT + /* +@@ -3189,10 +3186,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, + + return page; + } else { +- if (has_unaccepted_memory()) { +- if (try_to_accept_memory(zone, order)) +- goto try_this_zone; +- } ++ if (cond_accept_memory(zone, order)) ++ goto try_this_zone; + + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT + /* Try again if zone has deferred pages */ +@@ -6619,9 +6614,6 @@ static bool try_to_accept_memory_one(struct zone *zone) + struct page *page; + bool last; + +- if (list_empty(&zone->unaccepted_pages)) +- return false; +- + spin_lock_irqsave(&zone->lock, flags); + page = list_first_entry_or_null(&zone->unaccepted_pages, + struct page, lru); +@@ -6647,23 +6639,29 @@ static bool try_to_accept_memory_one(struct zone *zone) + return true; + } + +-static bool try_to_accept_memory(struct zone *zone, unsigned int order) ++static bool cond_accept_memory(struct zone *zone, unsigned int order) + { + long to_accept; +- int ret = false; ++ bool ret = false; ++ ++ if (!has_unaccepted_memory()) ++ return false; ++ ++ if (list_empty(&zone->unaccepted_pages)) ++ return false; + + /* How much to accept to get to high watermark? */ + to_accept = high_wmark_pages(zone) - + (zone_page_state(zone, NR_FREE_PAGES) - +- __zone_watermark_unusable_free(zone, order, 0)); ++ __zone_watermark_unusable_free(zone, order, 0) - ++ zone_page_state(zone, NR_UNACCEPTED)); + +- /* Accept at least one page */ +- do { ++ while (to_accept > 0) { + if (!try_to_accept_memory_one(zone)) + break; + ret = true; + to_accept -= MAX_ORDER_NR_PAGES; +- } while (to_accept > 0); ++ } + + return ret; + } +@@ -6706,7 +6704,7 @@ static void accept_page(struct page *page, unsigned int order) + { + } + +-static bool try_to_accept_memory(struct zone *zone, unsigned int order) ++static bool cond_accept_memory(struct zone *zone, unsigned int order) + { + return false; + } +diff --git a/mm/slab_common.c b/mm/slab_common.c +index 9bbffe82d65af..8d431193c2736 100644 +--- a/mm/slab_common.c ++++ b/mm/slab_common.c +@@ -528,26 +528,6 @@ bool slab_is_available(void) + } + + #ifdef CONFIG_PRINTK +-/** +- * kmem_valid_obj - does the pointer reference a valid slab object? +- * @object: pointer to query. +- * +- * Return: %true if the pointer is to a not-yet-freed object from +- * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer +- * is to an already-freed object, and %false otherwise. +- */ +-bool kmem_valid_obj(void *object) +-{ +- struct folio *folio; +- +- /* Some arches consider ZERO_SIZE_PTR to be a valid address. */ +- if (object < (void *)PAGE_SIZE || !virt_addr_valid(object)) +- return false; +- folio = virt_to_folio(object); +- return folio_test_slab(folio); +-} +-EXPORT_SYMBOL_GPL(kmem_valid_obj); +- + static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) + { + if (__kfence_obj_info(kpp, object, slab)) +@@ -566,11 +546,11 @@ static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab * + * and, if available, the slab name, return address, and stack trace from + * the allocation and last free path of that object. + * +- * This function will splat if passed a pointer to a non-slab object. +- * If you are not sure what type of object you have, you should instead +- * use mem_dump_obj(). ++ * Return: %true if the pointer is to a not-yet-freed object from ++ * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer ++ * is to an already-freed object, and %false otherwise. + */ +-void kmem_dump_obj(void *object) ++bool kmem_dump_obj(void *object) + { + char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc"; + int i; +@@ -578,13 +558,13 @@ void kmem_dump_obj(void *object) + unsigned long ptroffset; + struct kmem_obj_info kp = { }; + +- if (WARN_ON_ONCE(!virt_addr_valid(object))) +- return; ++ /* Some arches consider ZERO_SIZE_PTR to be a valid address. */ ++ if (object < (void *)PAGE_SIZE || !virt_addr_valid(object)) ++ return false; + slab = virt_to_slab(object); +- if (WARN_ON_ONCE(!slab)) { +- pr_cont(" non-slab memory.\n"); +- return; +- } ++ if (!slab) ++ return false; ++ + kmem_obj_info(&kp, object, slab); + if (kp.kp_slab_cache) + pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name); +@@ -621,6 +601,7 @@ void kmem_dump_obj(void *object) + pr_info(" %pS\n", kp.kp_free_stack[i]); + } + ++ return true; + } + EXPORT_SYMBOL_GPL(kmem_dump_obj); + #endif +diff --git a/mm/util.c b/mm/util.c +index be798981acc7d..2d5a309c4e548 100644 +--- a/mm/util.c ++++ b/mm/util.c +@@ -1070,10 +1070,8 @@ void mem_dump_obj(void *object) + { + const char *type; + +- if (kmem_valid_obj(object)) { +- kmem_dump_obj(object); ++ if (kmem_dump_obj(object)) + return; +- } + + if (vmalloc_dump_obj(object)) + return; +diff --git a/mm/vmalloc.c b/mm/vmalloc.c +index 078f6b53f8d50..732ff66d1b513 100644 +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -3079,15 +3079,8 @@ vm_area_alloc_pages(gfp_t gfp, int nid, + page = alloc_pages(alloc_gfp, order); + else + page = alloc_pages_node(nid, alloc_gfp, order); +- if (unlikely(!page)) { +- if (!nofail) +- break; +- +- /* fall back to the zero order allocations */ +- alloc_gfp |= __GFP_NOFAIL; +- order = 0; +- continue; +- } ++ if (unlikely(!page)) ++ break; + + /* + * Higher order allocations must be able to be treated as +diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c +index 5a6a49885ab66..a660c428e2207 100644 +--- a/net/bluetooth/bnep/core.c ++++ b/net/bluetooth/bnep/core.c +@@ -385,7 +385,8 @@ static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) + + case BNEP_COMPRESSED_DST_ONLY: + __skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN); +- __skb_put_data(nskb, s->eh.h_source, ETH_ALEN + 2); ++ __skb_put_data(nskb, s->eh.h_source, ETH_ALEN); ++ put_unaligned(s->eh.h_proto, (__be16 *)__skb_put(nskb, 2)); + break; + + case BNEP_GENERAL: +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c +index 9c670348fac42..dc1c07c7d4ff9 100644 +--- a/net/bluetooth/hci_conn.c ++++ b/net/bluetooth/hci_conn.c +@@ -299,6 +299,13 @@ static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec) + __u8 vnd_len, *vnd_data = NULL; + struct hci_op_configure_data_path *cmd = NULL; + ++ if (!codec->data_path || !hdev->get_codec_config_data) ++ return 0; ++ ++ /* Do not take me as error */ ++ if (!hdev->get_codec_config_data) ++ return 0; ++ + err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len, + &vnd_data); + if (err < 0) +@@ -344,9 +351,7 @@ static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data) + + bt_dev_dbg(hdev, "hcon %p", conn); + +- /* for offload use case, codec needs to configured before opening SCO */ +- if (conn->codec.data_path) +- configure_datapath_sync(hdev, &conn->codec); ++ configure_datapath_sync(hdev, &conn->codec); + + conn->state = BT_CONNECT; + conn->out = true; +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c +index c137f85a7fed7..e660b3d661dae 100644 +--- a/net/bluetooth/hci_core.c ++++ b/net/bluetooth/hci_core.c +@@ -3637,19 +3637,19 @@ static void hci_sched_le(struct hci_dev *hdev) + { + struct hci_chan *chan; + struct sk_buff *skb; +- int quote, cnt, tmp; ++ int quote, *cnt, tmp; + + BT_DBG("%s", hdev->name); + + if (!hci_conn_num(hdev, LE_LINK)) + return; + +- cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; ++ cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt; + +- __check_timeout(hdev, cnt, LE_LINK); ++ __check_timeout(hdev, *cnt, LE_LINK); + +- tmp = cnt; +- while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { ++ tmp = *cnt; ++ while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { + u32 priority = (skb_peek(&chan->data_q))->priority; + while (quote-- && (skb = skb_peek(&chan->data_q))) { + BT_DBG("chan %p skb %p len %d priority %u", chan, skb, +@@ -3664,7 +3664,7 @@ static void hci_sched_le(struct hci_dev *hdev) + hci_send_frame(hdev, skb); + hdev->le_last_tx = jiffies; + +- cnt--; ++ (*cnt)--; + chan->sent++; + chan->conn->sent++; + +@@ -3674,12 +3674,7 @@ static void hci_sched_le(struct hci_dev *hdev) + } + } + +- if (hdev->le_pkts) +- hdev->le_cnt = cnt; +- else +- hdev->acl_cnt = cnt; +- +- if (cnt != tmp) ++ if (*cnt != tmp) + hci_prio_recalculate(hdev, LE_LINK); + } + +diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c +index 0ca6593a029c0..3533ac679e42c 100644 +--- a/net/bluetooth/mgmt.c ++++ b/net/bluetooth/mgmt.c +@@ -3449,6 +3449,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, + * will be kept and this function does nothing. + */ + p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type); ++ if (!p) { ++ err = -EIO; ++ goto unlock; ++ } + + if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT) + p->auto_connect = HCI_AUTO_CONN_DISABLED; +diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c +index 37f95ea8c7db5..fa3986cfd5266 100644 +--- a/net/bluetooth/smp.c ++++ b/net/bluetooth/smp.c +@@ -915,7 +915,7 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, + * Confirms and the responder Enters the passkey. + */ + if (smp->method == OVERLAP) { +- if (hcon->role == HCI_ROLE_MASTER) ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) + smp->method = CFM_PASSKEY; + else + smp->method = REQ_PASSKEY; +@@ -965,7 +965,7 @@ static u8 smp_confirm(struct smp_chan *smp) + + smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp); + +- if (conn->hcon->out) ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); + else + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); +@@ -981,7 +981,8 @@ static u8 smp_random(struct smp_chan *smp) + int ret; + + bt_dev_dbg(conn->hcon->hdev, "conn %p %s", conn, +- conn->hcon->out ? "initiator" : "responder"); ++ test_bit(SMP_FLAG_INITIATOR, &smp->flags) ? "initiator" : ++ "responder"); + + ret = smp_c1(smp->tk, smp->rrnd, smp->preq, smp->prsp, + hcon->init_addr_type, &hcon->init_addr, +@@ -995,7 +996,7 @@ static u8 smp_random(struct smp_chan *smp) + return SMP_CONFIRM_FAILED; + } + +- if (hcon->out) { ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { + u8 stk[16]; + __le64 rand = 0; + __le16 ediv = 0; +@@ -1257,14 +1258,15 @@ static void smp_distribute_keys(struct smp_chan *smp) + rsp = (void *) &smp->prsp[1]; + + /* The responder sends its keys first */ +- if (hcon->out && (smp->remote_key_dist & KEY_DIST_MASK)) { ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags) && ++ (smp->remote_key_dist & KEY_DIST_MASK)) { + smp_allow_key_dist(smp); + return; + } + + req = (void *) &smp->preq[1]; + +- if (hcon->out) { ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { + keydist = &rsp->init_key_dist; + *keydist &= req->init_key_dist; + } else { +@@ -1433,7 +1435,7 @@ static int sc_mackey_and_ltk(struct smp_chan *smp, u8 mackey[16], u8 ltk[16]) + struct hci_conn *hcon = smp->conn->hcon; + u8 *na, *nb, a[7], b[7]; + +- if (hcon->out) { ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { + na = smp->prnd; + nb = smp->rrnd; + } else { +@@ -1461,7 +1463,7 @@ static void sc_dhkey_check(struct smp_chan *smp) + a[6] = hcon->init_addr_type; + b[6] = hcon->resp_addr_type; + +- if (hcon->out) { ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { + local_addr = a; + remote_addr = b; + memcpy(io_cap, &smp->preq[1], 3); +@@ -1540,7 +1542,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op) + /* The round is only complete when the initiator + * receives pairing random. + */ +- if (!hcon->out) { ++ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, + sizeof(smp->prnd), smp->prnd); + if (smp->passkey_round == 20) +@@ -1568,7 +1570,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op) + + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); + +- if (hcon->out) { ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, + sizeof(smp->prnd), smp->prnd); + return 0; +@@ -1579,7 +1581,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op) + case SMP_CMD_PUBLIC_KEY: + default: + /* Initiating device starts the round */ +- if (!hcon->out) ++ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) + return 0; + + bt_dev_dbg(hdev, "Starting passkey round %u", +@@ -1624,7 +1626,7 @@ static int sc_user_reply(struct smp_chan *smp, u16 mgmt_op, __le32 passkey) + } + + /* Initiator sends DHKey check first */ +- if (hcon->out) { ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { + sc_dhkey_check(smp); + SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); + } else if (test_and_clear_bit(SMP_FLAG_DHKEY_PENDING, &smp->flags)) { +@@ -1747,7 +1749,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) + struct smp_cmd_pairing rsp, *req = (void *) skb->data; + struct l2cap_chan *chan = conn->smp; + struct hci_dev *hdev = conn->hcon->hdev; +- struct smp_chan *smp; ++ struct smp_chan *smp = chan->data; + u8 key_size, auth, sec_level; + int ret; + +@@ -1756,16 +1758,14 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) + if (skb->len < sizeof(*req)) + return SMP_INVALID_PARAMS; + +- if (conn->hcon->role != HCI_ROLE_SLAVE) ++ if (smp && test_bit(SMP_FLAG_INITIATOR, &smp->flags)) + return SMP_CMD_NOTSUPP; + +- if (!chan->data) ++ if (!smp) { + smp = smp_chan_create(conn); +- else +- smp = chan->data; +- +- if (!smp) +- return SMP_UNSPECIFIED; ++ if (!smp) ++ return SMP_UNSPECIFIED; ++ } + + /* We didn't start the pairing, so match remote */ + auth = req->auth_req & AUTH_REQ_MASK(hdev); +@@ -1947,7 +1947,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) + if (skb->len < sizeof(*rsp)) + return SMP_INVALID_PARAMS; + +- if (conn->hcon->role != HCI_ROLE_MASTER) ++ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) + return SMP_CMD_NOTSUPP; + + skb_pull(skb, sizeof(*rsp)); +@@ -2042,7 +2042,7 @@ static u8 sc_check_confirm(struct smp_chan *smp) + if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY) + return sc_passkey_round(smp, SMP_CMD_PAIRING_CONFIRM); + +- if (conn->hcon->out) { ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), + smp->prnd); + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); +@@ -2064,7 +2064,7 @@ static int fixup_sc_false_positive(struct smp_chan *smp) + u8 auth; + + /* The issue is only observed when we're in responder role */ +- if (hcon->out) ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) + return SMP_UNSPECIFIED; + + if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { +@@ -2100,7 +2100,8 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) + struct hci_dev *hdev = hcon->hdev; + + bt_dev_dbg(hdev, "conn %p %s", conn, +- hcon->out ? "initiator" : "responder"); ++ test_bit(SMP_FLAG_INITIATOR, &smp->flags) ? "initiator" : ++ "responder"); + + if (skb->len < sizeof(smp->pcnf)) + return SMP_INVALID_PARAMS; +@@ -2122,7 +2123,7 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) + return ret; + } + +- if (conn->hcon->out) { ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), + smp->prnd); + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); +@@ -2157,7 +2158,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) + if (!test_bit(SMP_FLAG_SC, &smp->flags)) + return smp_random(smp); + +- if (hcon->out) { ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { + pkax = smp->local_pk; + pkbx = smp->remote_pk; + na = smp->prnd; +@@ -2170,7 +2171,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) + } + + if (smp->method == REQ_OOB) { +- if (!hcon->out) ++ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, + sizeof(smp->prnd), smp->prnd); + SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); +@@ -2181,7 +2182,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) + if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY) + return sc_passkey_round(smp, SMP_CMD_PAIRING_RANDOM); + +- if (hcon->out) { ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { + u8 cfm[16]; + + err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->local_pk, +@@ -2222,7 +2223,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) + return SMP_UNSPECIFIED; + + if (smp->method == REQ_OOB) { +- if (hcon->out) { ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { + sc_dhkey_check(smp); + SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); + } +@@ -2296,10 +2297,27 @@ bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level, + return false; + } + ++static void smp_send_pairing_req(struct smp_chan *smp, __u8 auth) ++{ ++ struct smp_cmd_pairing cp; ++ ++ if (smp->conn->hcon->type == ACL_LINK) ++ build_bredr_pairing_cmd(smp, &cp, NULL); ++ else ++ build_pairing_cmd(smp->conn, &cp, NULL, auth); ++ ++ smp->preq[0] = SMP_CMD_PAIRING_REQ; ++ memcpy(&smp->preq[1], &cp, sizeof(cp)); ++ ++ smp_send_cmd(smp->conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); ++ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP); ++ ++ set_bit(SMP_FLAG_INITIATOR, &smp->flags); ++} ++ + static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) + { + struct smp_cmd_security_req *rp = (void *) skb->data; +- struct smp_cmd_pairing cp; + struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; + struct smp_chan *smp; +@@ -2348,16 +2366,20 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) + + skb_pull(skb, sizeof(*rp)); + +- memset(&cp, 0, sizeof(cp)); +- build_pairing_cmd(conn, &cp, NULL, auth); ++ smp_send_pairing_req(smp, auth); + +- smp->preq[0] = SMP_CMD_PAIRING_REQ; +- memcpy(&smp->preq[1], &cp, sizeof(cp)); ++ return 0; ++} + +- smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); +- SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP); ++static void smp_send_security_req(struct smp_chan *smp, __u8 auth) ++{ ++ struct smp_cmd_security_req cp; + +- return 0; ++ cp.auth_req = auth; ++ smp_send_cmd(smp->conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp); ++ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_REQ); ++ ++ clear_bit(SMP_FLAG_INITIATOR, &smp->flags); + } + + int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) +@@ -2428,23 +2450,11 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) + authreq |= SMP_AUTH_MITM; + } + +- if (hcon->role == HCI_ROLE_MASTER) { +- struct smp_cmd_pairing cp; +- +- build_pairing_cmd(conn, &cp, NULL, authreq); +- smp->preq[0] = SMP_CMD_PAIRING_REQ; +- memcpy(&smp->preq[1], &cp, sizeof(cp)); +- +- smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); +- SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP); +- } else { +- struct smp_cmd_security_req cp; +- cp.auth_req = authreq; +- smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp); +- SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_REQ); +- } ++ if (hcon->role == HCI_ROLE_MASTER) ++ smp_send_pairing_req(smp, authreq); ++ else ++ smp_send_security_req(smp, authreq); + +- set_bit(SMP_FLAG_INITIATOR, &smp->flags); + ret = 0; + + unlock: +@@ -2695,8 +2705,6 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb) + + static u8 sc_select_method(struct smp_chan *smp) + { +- struct l2cap_conn *conn = smp->conn; +- struct hci_conn *hcon = conn->hcon; + struct smp_cmd_pairing *local, *remote; + u8 local_mitm, remote_mitm, local_io, remote_io, method; + +@@ -2709,7 +2717,7 @@ static u8 sc_select_method(struct smp_chan *smp) + * the "struct smp_cmd_pairing" from them we need to skip the + * first byte which contains the opcode. + */ +- if (hcon->out) { ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { + local = (void *) &smp->preq[1]; + remote = (void *) &smp->prsp[1]; + } else { +@@ -2778,7 +2786,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) + /* Non-initiating device sends its public key after receiving + * the key from the initiating device. + */ +- if (!hcon->out) { ++ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { + err = sc_send_public_key(smp); + if (err) + return err; +@@ -2840,7 +2848,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) + } + + if (smp->method == REQ_OOB) { +- if (hcon->out) ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, + sizeof(smp->prnd), smp->prnd); + +@@ -2849,7 +2857,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) + return 0; + } + +- if (hcon->out) ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); + + if (smp->method == REQ_PASSKEY) { +@@ -2864,7 +2872,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) + /* The Initiating device waits for the non-initiating device to + * send the confirm value. + */ +- if (conn->hcon->out) ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) + return 0; + + err = smp_f4(smp->tfm_cmac, smp->local_pk, smp->remote_pk, smp->prnd, +@@ -2898,7 +2906,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb) + a[6] = hcon->init_addr_type; + b[6] = hcon->resp_addr_type; + +- if (hcon->out) { ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { + local_addr = a; + remote_addr = b; + memcpy(io_cap, &smp->prsp[1], 3); +@@ -2923,7 +2931,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb) + if (crypto_memneq(check->e, e, 16)) + return SMP_DHKEY_CHECK_FAILED; + +- if (!hcon->out) { ++ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { + if (test_bit(SMP_FLAG_WAIT_USER, &smp->flags)) { + set_bit(SMP_FLAG_DHKEY_PENDING, &smp->flags); + return 0; +@@ -2935,7 +2943,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb) + + sc_add_ltk(smp); + +- if (hcon->out) { ++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { + hci_le_start_enc(hcon, 0, 0, smp->tk, smp->enc_key_size); + hcon->enc_key_size = smp->enc_key_size; + } +@@ -3084,7 +3092,6 @@ static void bredr_pairing(struct l2cap_chan *chan) + struct l2cap_conn *conn = chan->conn; + struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; +- struct smp_cmd_pairing req; + struct smp_chan *smp; + + bt_dev_dbg(hdev, "chan %p", chan); +@@ -3136,14 +3143,7 @@ static void bredr_pairing(struct l2cap_chan *chan) + + bt_dev_dbg(hdev, "starting SMP over BR/EDR"); + +- /* Prepare and send the BR/EDR SMP Pairing Request */ +- build_bredr_pairing_cmd(smp, &req, NULL); +- +- smp->preq[0] = SMP_CMD_PAIRING_REQ; +- memcpy(&smp->preq[1], &req, sizeof(req)); +- +- smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(req), &req); +- SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP); ++ smp_send_pairing_req(smp, 0x00); + } + + static void smp_resume_cb(struct l2cap_chan *chan) +diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c +index d848c84ed030d..68d5538613032 100644 +--- a/net/bridge/br_netfilter_hooks.c ++++ b/net/bridge/br_netfilter_hooks.c +@@ -618,8 +618,12 @@ static unsigned int br_nf_local_in(void *priv, + if (likely(nf_ct_is_confirmed(ct))) + return NF_ACCEPT; + ++ if (WARN_ON_ONCE(refcount_read(&nfct->use) != 1)) { ++ nf_reset_ct(skb); ++ return NF_ACCEPT; ++ } ++ + WARN_ON_ONCE(skb_shared(skb)); +- WARN_ON_ONCE(refcount_read(&nfct->use) != 1); + + /* We can't call nf_confirm here, it would create a dependency + * on nf_conntrack module. +diff --git a/net/core/sock_map.c b/net/core/sock_map.c +index 01be07b485fad..a37143d181f95 100644 +--- a/net/core/sock_map.c ++++ b/net/core/sock_map.c +@@ -411,9 +411,6 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test, + struct sock *sk; + int err = 0; + +- if (irqs_disabled()) +- return -EOPNOTSUPP; /* locks here are hardirq-unsafe */ +- + spin_lock_bh(&stab->lock); + sk = *psk; + if (!sk_test || sk_test == sk) +@@ -936,9 +933,6 @@ static long sock_hash_delete_elem(struct bpf_map *map, void *key) + struct bpf_shtab_elem *elem; + int ret = -ENOENT; + +- if (irqs_disabled()) +- return -EOPNOTSUPP; /* locks here are hardirq-unsafe */ +- + hash = sock_hash_bucket_hash(key, key_size); + bucket = sock_hash_select_bucket(htab, hash); + +diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c +index 65a6733fc897f..ca31c3b096bbf 100644 +--- a/net/dccp/ipv4.c ++++ b/net/dccp/ipv4.c +@@ -1042,7 +1042,7 @@ static void __net_exit dccp_v4_exit_net(struct net *net) + + static void __net_exit dccp_v4_exit_batch(struct list_head *net_exit_list) + { +- inet_twsk_purge(&dccp_hashinfo, AF_INET); ++ inet_twsk_purge(&dccp_hashinfo); + } + + static struct pernet_operations dccp_v4_ops = { +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c +index 683e4291b348a..d25e962b18a53 100644 +--- a/net/dccp/ipv6.c ++++ b/net/dccp/ipv6.c +@@ -1122,15 +1122,9 @@ static void __net_exit dccp_v6_exit_net(struct net *net) + inet_ctl_sock_destroy(pn->v6_ctl_sk); + } + +-static void __net_exit dccp_v6_exit_batch(struct list_head *net_exit_list) +-{ +- inet_twsk_purge(&dccp_hashinfo, AF_INET6); +-} +- + static struct pernet_operations dccp_v6_ops = { + .init = dccp_v6_init_net, + .exit = dccp_v6_exit_net, +- .exit_batch = dccp_v6_exit_batch, + .id = &dccp_v6_pernet_id, + .size = sizeof(struct dccp_v6_pernet), + }; +diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c +index 20bf7074d5a67..ff0ae3f6be566 100644 +--- a/net/dsa/tag_ocelot.c ++++ b/net/dsa/tag_ocelot.c +@@ -8,40 +8,6 @@ + #define OCELOT_NAME "ocelot" + #define SEVILLE_NAME "seville" + +-/* If the port is under a VLAN-aware bridge, remove the VLAN header from the +- * payload and move it into the DSA tag, which will make the switch classify +- * the packet to the bridge VLAN. Otherwise, leave the classified VLAN at zero, +- * which is the pvid of standalone and VLAN-unaware bridge ports. +- */ +-static void ocelot_xmit_get_vlan_info(struct sk_buff *skb, struct dsa_port *dp, +- u64 *vlan_tci, u64 *tag_type) +-{ +- struct net_device *br = dsa_port_bridge_dev_get(dp); +- struct vlan_ethhdr *hdr; +- u16 proto, tci; +- +- if (!br || !br_vlan_enabled(br)) { +- *vlan_tci = 0; +- *tag_type = IFH_TAG_TYPE_C; +- return; +- } +- +- hdr = skb_vlan_eth_hdr(skb); +- br_vlan_get_proto(br, &proto); +- +- if (ntohs(hdr->h_vlan_proto) == proto) { +- vlan_remove_tag(skb, &tci); +- *vlan_tci = tci; +- } else { +- rcu_read_lock(); +- br_vlan_get_pvid_rcu(br, &tci); +- rcu_read_unlock(); +- *vlan_tci = tci; +- } +- +- *tag_type = (proto != ETH_P_8021Q) ? IFH_TAG_TYPE_S : IFH_TAG_TYPE_C; +-} +- + static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev, + __be32 ifh_prefix, void **ifh) + { +@@ -53,7 +19,8 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev, + u32 rew_op = 0; + u64 qos_class; + +- ocelot_xmit_get_vlan_info(skb, dp, &vlan_tci, &tag_type); ++ ocelot_xmit_get_vlan_info(skb, dsa_port_bridge_dev_get(dp), &vlan_tci, ++ &tag_type); + + qos_class = netdev_get_num_tc(netdev) ? + netdev_get_prio_tc_map(netdev, skb->priority) : skb->priority; +diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c +index 757ae3a4e2f1a..fff53144250c5 100644 +--- a/net/ipv4/inet_timewait_sock.c ++++ b/net/ipv4/inet_timewait_sock.c +@@ -279,14 +279,18 @@ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm) + EXPORT_SYMBOL_GPL(__inet_twsk_schedule); + + /* Remove all non full sockets (TIME_WAIT and NEW_SYN_RECV) for dead netns */ +-void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family) ++void inet_twsk_purge(struct inet_hashinfo *hashinfo) + { ++ struct inet_ehash_bucket *head = &hashinfo->ehash[0]; ++ unsigned int ehash_mask = hashinfo->ehash_mask; + struct hlist_nulls_node *node; + unsigned int slot; + struct sock *sk; + +- for (slot = 0; slot <= hashinfo->ehash_mask; slot++) { +- struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; ++ for (slot = 0; slot <= ehash_mask; slot++, head++) { ++ if (hlist_nulls_empty(&head->chain)) ++ continue; ++ + restart_rcu: + cond_resched(); + rcu_read_lock(); +@@ -298,15 +302,13 @@ void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family) + TCPF_NEW_SYN_RECV)) + continue; + +- if (sk->sk_family != family || +- refcount_read(&sock_net(sk)->ns.count)) ++ if (refcount_read(&sock_net(sk)->ns.count)) + continue; + + if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt))) + continue; + +- if (unlikely(sk->sk_family != family || +- refcount_read(&sock_net(sk)->ns.count))) { ++ if (refcount_read(&sock_net(sk)->ns.count)) { + sock_gen_put(sk); + goto restart; + } +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index d0364cff65c9f..24c7c955dc955 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -243,9 +243,14 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) + */ + if (unlikely(len != icsk->icsk_ack.rcv_mss)) { + u64 val = (u64)skb->len << TCP_RMEM_TO_WIN_SCALE; ++ u8 old_ratio = tcp_sk(sk)->scaling_ratio; + + do_div(val, skb->truesize); + tcp_sk(sk)->scaling_ratio = val ? val : 1; ++ ++ if (old_ratio != tcp_sk(sk)->scaling_ratio) ++ WRITE_ONCE(tcp_sk(sk)->window_clamp, ++ tcp_win_from_space(sk, sk->sk_rcvbuf)); + } + icsk->icsk_ack.rcv_mss = min_t(unsigned int, len, + tcp_sk(sk)->advmss); +@@ -748,7 +753,8 @@ void tcp_rcv_space_adjust(struct sock *sk) + * + */ + +- if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf)) { ++ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) && ++ !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { + u64 rcvwin, grow; + int rcvbuf; + +@@ -764,22 +770,12 @@ void tcp_rcv_space_adjust(struct sock *sk) + + rcvbuf = min_t(u64, tcp_space_from_win(sk, rcvwin), + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); +- if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { +- if (rcvbuf > sk->sk_rcvbuf) { +- WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); +- +- /* Make the window clamp follow along. */ +- WRITE_ONCE(tp->window_clamp, +- tcp_win_from_space(sk, rcvbuf)); +- } +- } else { +- /* Make the window clamp follow along while being bounded +- * by SO_RCVBUF. +- */ +- int clamp = tcp_win_from_space(sk, min(rcvbuf, sk->sk_rcvbuf)); ++ if (rcvbuf > sk->sk_rcvbuf) { ++ WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); + +- if (clamp > tp->window_clamp) +- WRITE_ONCE(tp->window_clamp, clamp); ++ /* Make the window clamp follow along. */ ++ WRITE_ONCE(tp->window_clamp, ++ tcp_win_from_space(sk, rcvbuf)); + } + } + tp->rcvq_space.space = copied; +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index 48ec2c1777d45..96d235bcf5cb2 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -94,6 +94,8 @@ EXPORT_SYMBOL(tcp_hashinfo); + + static DEFINE_PER_CPU(struct sock *, ipv4_tcp_sk); + ++static DEFINE_MUTEX(tcp_exit_batch_mutex); ++ + static u32 tcp_v4_init_seq(const struct sk_buff *skb) + { + return secure_tcp_seq(ip_hdr(skb)->daddr, +@@ -3304,13 +3306,25 @@ static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) + { + struct net *net; + +- tcp_twsk_purge(net_exit_list, AF_INET); ++ /* make sure concurrent calls to tcp_sk_exit_batch from net_cleanup_work ++ * and failed setup_net error unwinding path are serialized. ++ * ++ * tcp_twsk_purge() handles twsk in any dead netns, not just those in ++ * net_exit_list, the thread that dismantles a particular twsk must ++ * do so without other thread progressing to refcount_dec_and_test() of ++ * tcp_death_row.tw_refcount. ++ */ ++ mutex_lock(&tcp_exit_batch_mutex); ++ ++ tcp_twsk_purge(net_exit_list); + + list_for_each_entry(net, net_exit_list, exit_list) { + inet_pernet_hashinfo_free(net->ipv4.tcp_death_row.hashinfo); + WARN_ON_ONCE(!refcount_dec_and_test(&net->ipv4.tcp_death_row.tw_refcount)); + tcp_fastopen_ctx_destroy(net); + } ++ ++ mutex_unlock(&tcp_exit_batch_mutex); + } + + static struct pernet_operations __net_initdata tcp_sk_ops = { +diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c +index 260bfb9ada38d..0c04c460001f1 100644 +--- a/net/ipv4/tcp_minisocks.c ++++ b/net/ipv4/tcp_minisocks.c +@@ -363,7 +363,7 @@ void tcp_twsk_destructor(struct sock *sk) + } + EXPORT_SYMBOL_GPL(tcp_twsk_destructor); + +-void tcp_twsk_purge(struct list_head *net_exit_list, int family) ++void tcp_twsk_purge(struct list_head *net_exit_list) + { + bool purged_once = false; + struct net *net; +@@ -371,14 +371,13 @@ void tcp_twsk_purge(struct list_head *net_exit_list, int family) + list_for_each_entry(net, net_exit_list, exit_list) { + if (net->ipv4.tcp_death_row.hashinfo->pernet) { + /* Even if tw_refcount == 1, we must clean up kernel reqsk */ +- inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family); ++ inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo); + } else if (!purged_once) { +- inet_twsk_purge(&tcp_hashinfo, family); ++ inet_twsk_purge(&tcp_hashinfo); + purged_once = true; + } + } + } +-EXPORT_SYMBOL_GPL(tcp_twsk_purge); + + /* Warning : This function is called without sk_listener being locked. + * Be sure to read socket fields once, as their value could change under us. +diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c +index 9cb13a50011ef..f544016e6eb3f 100644 +--- a/net/ipv4/udp_offload.c ++++ b/net/ipv4/udp_offload.c +@@ -279,7 +279,8 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, + return ERR_PTR(-EINVAL); + + if (unlikely(skb_checksum_start(gso_skb) != +- skb_transport_header(gso_skb))) ++ skb_transport_header(gso_skb) && ++ !(skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST))) + return ERR_PTR(-EINVAL); + + if (skb_gso_ok(gso_skb, features | NETIF_F_GSO_ROBUST)) { +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index db8d0e1bf69ff..5d8d86c159dc3 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -70,11 +70,15 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * + + /* Be paranoid, rather than too clever. */ + if (unlikely(hh_len > skb_headroom(skb)) && dev->header_ops) { ++ /* Make sure idev stays alive */ ++ rcu_read_lock(); + skb = skb_expand_head(skb, hh_len); + if (!skb) { + IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); ++ rcu_read_unlock(); + return -ENOMEM; + } ++ rcu_read_unlock(); + } + + hdr = ipv6_hdr(skb); +@@ -277,11 +281,15 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, + head_room += opt->opt_nflen + opt->opt_flen; + + if (unlikely(head_room > skb_headroom(skb))) { ++ /* Make sure idev stays alive */ ++ rcu_read_lock(); + skb = skb_expand_head(skb, head_room); + if (!skb) { + IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); ++ rcu_read_unlock(); + return -ENOBUFS; + } ++ rcu_read_unlock(); + } + + if (opt) { +@@ -2023,6 +2031,7 @@ int ip6_send_skb(struct sk_buff *skb) + struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); + int err; + ++ rcu_read_lock(); + err = ip6_local_out(net, skb->sk, skb); + if (err) { + if (err > 0) +@@ -2032,6 +2041,7 @@ int ip6_send_skb(struct sk_buff *skb) + IPSTATS_MIB_OUTDISCARDS); + } + ++ rcu_read_unlock(); + return err; + } + +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c +index 70478027a7af7..97905d4174eca 100644 +--- a/net/ipv6/ip6_tunnel.c ++++ b/net/ipv6/ip6_tunnel.c +@@ -1508,7 +1508,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) + tdev = __dev_get_by_index(t->net, p->link); + + if (tdev) { +- dev->hard_header_len = tdev->hard_header_len + t_hlen; ++ dev->needed_headroom = tdev->hard_header_len + ++ tdev->needed_headroom + t_hlen; + mtu = min_t(unsigned int, tdev->mtu, IP6_MAX_MTU); + + mtu = mtu - t_hlen; +@@ -1732,7 +1733,9 @@ ip6_tnl_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) + { + struct ip6_tnl *tnl = netdev_priv(dev); ++ int t_hlen; + ++ t_hlen = tnl->hlen + sizeof(struct ipv6hdr); + if (tnl->parms.proto == IPPROTO_IPV6) { + if (new_mtu < IPV6_MIN_MTU) + return -EINVAL; +@@ -1741,10 +1744,10 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) + return -EINVAL; + } + if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) { +- if (new_mtu > IP6_MAX_MTU - dev->hard_header_len) ++ if (new_mtu > IP6_MAX_MTU - dev->hard_header_len - t_hlen) + return -EINVAL; + } else { +- if (new_mtu > IP_MAX_MTU - dev->hard_header_len) ++ if (new_mtu > IP_MAX_MTU - dev->hard_header_len - t_hlen) + return -EINVAL; + } + dev->mtu = new_mtu; +@@ -1890,12 +1893,11 @@ ip6_tnl_dev_init_gen(struct net_device *dev) + t_hlen = t->hlen + sizeof(struct ipv6hdr); + + dev->type = ARPHRD_TUNNEL6; +- dev->hard_header_len = LL_MAX_HEADER + t_hlen; + dev->mtu = ETH_DATA_LEN - t_hlen; + if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) + dev->mtu -= 8; + dev->min_mtu = ETH_MIN_MTU; +- dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len; ++ dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len - t_hlen; + + netdev_hold(dev, &t->dev_tracker, GFP_KERNEL); + netdev_lockdep_set_classes(dev); +diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c +index efbec7ee27d0a..c78b13ea5b196 100644 +--- a/net/ipv6/netfilter/nf_conntrack_reasm.c ++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c +@@ -155,6 +155,10 @@ static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user, + }; + struct inet_frag_queue *q; + ++ if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST | ++ IPV6_ADDR_LINKLOCAL))) ++ key.iif = 0; ++ + q = inet_frag_find(nf_frag->fqdir, &key); + if (!q) + return NULL; +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index d0034916d386b..83b48dc2b3ee2 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -2213,15 +2213,9 @@ static void __net_exit tcpv6_net_exit(struct net *net) + inet_ctl_sock_destroy(net->ipv6.tcp_sk); + } + +-static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list) +-{ +- tcp_twsk_purge(net_exit_list, AF_INET6); +-} +- + static struct pernet_operations tcpv6_net_ops = { + .init = tcpv6_net_init, + .exit = tcpv6_net_exit, +- .exit_batch = tcpv6_net_exit_batch, + }; + + int __init tcpv6_init(void) +diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c +index db41eb2d977f2..038e1ba9aec27 100644 +--- a/net/iucv/iucv.c ++++ b/net/iucv/iucv.c +@@ -1090,8 +1090,7 @@ static int iucv_message_receive_iprmdata(struct iucv_path *path, + size = (size < 8) ? size : 8; + for (array = buffer; size > 0; array++) { + copy = min_t(size_t, size, array->length); +- memcpy((u8 *)(addr_t) array->address, +- rmmsg, copy); ++ memcpy(phys_to_virt(array->address), rmmsg, copy); + rmmsg += copy; + size -= copy; + } +diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c +index 41d892bf12cc6..829eb67240a99 100644 +--- a/net/kcm/kcmsock.c ++++ b/net/kcm/kcmsock.c +@@ -754,6 +754,7 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) + !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR); + int err = -EPIPE; + ++ mutex_lock(&kcm->tx_mutex); + lock_sock(sk); + + /* Per tcp_sendmsg this should be in poll */ +@@ -925,6 +926,7 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) + KCM_STATS_ADD(kcm->stats.tx_bytes, copied); + + release_sock(sk); ++ mutex_unlock(&kcm->tx_mutex); + return copied; + + out_error: +@@ -950,6 +952,7 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) + sk->sk_write_space(sk); + + release_sock(sk); ++ mutex_unlock(&kcm->tx_mutex); + return err; + } + +@@ -1203,6 +1206,7 @@ static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux) + spin_unlock_bh(&mux->lock); + + INIT_WORK(&kcm->tx_work, kcm_tx_work); ++ mutex_init(&kcm->tx_mutex); + + spin_lock_bh(&mux->rx_lock); + kcm_rcv_ready(kcm); +diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c +index b6b7726858815..0a69e47f1c55f 100644 +--- a/net/mac80211/agg-tx.c ++++ b/net/mac80211/agg-tx.c +@@ -497,7 +497,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) + { + struct tid_ampdu_tx *tid_tx; + struct ieee80211_local *local = sta->local; +- struct ieee80211_sub_if_data *sdata; ++ struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_ampdu_params params = { + .sta = &sta->sta, + .action = IEEE80211_AMPDU_TX_START, +@@ -525,7 +525,6 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) + */ + synchronize_net(); + +- sdata = sta->sdata; + params.ssn = sta->tid_seq[tid] >> 4; + ret = drv_ampdu_action(local, sdata, ¶ms); + tid_tx->ssn = params.ssn; +@@ -539,9 +538,6 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) + */ + set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state); + } else if (ret) { +- if (!sdata) +- return; +- + ht_dbg(sdata, + "BA request denied - HW unavailable for %pM tid %d\n", + sta->sta.addr, tid); +diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c +index f8af0c3d405ae..d6478fd00badf 100644 +--- a/net/mac80211/driver-ops.c ++++ b/net/mac80211/driver-ops.c +@@ -393,9 +393,6 @@ int drv_ampdu_action(struct ieee80211_local *local, + + might_sleep(); + +- if (!sdata) +- return -EIO; +- + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return -EIO; +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c +index 52b048807feae..a7c39e895b1e5 100644 +--- a/net/mac80211/iface.c ++++ b/net/mac80211/iface.c +@@ -2315,6 +2315,20 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) + list_for_each_entry_safe(sdata, tmp, &unreg_list, list) { + bool netdev = sdata->dev; + ++ /* ++ * Remove IP addresses explicitly, since the notifier will ++ * skip the callbacks if wdev->registered is false, since ++ * we can't acquire the wiphy_lock() again there if already ++ * inside this locked section. ++ */ ++ sdata_lock(sdata); ++ sdata->vif.cfg.arp_addr_cnt = 0; ++ if (sdata->vif.type == NL80211_IFTYPE_STATION && ++ sdata->u.mgd.associated) ++ ieee80211_vif_cfg_change_notify(sdata, ++ BSS_CHANGED_ARP_FILTER); ++ sdata_unlock(sdata); ++ + list_del(&sdata->list); + cfg80211_unregister_wdev(&sdata->wdev); + +diff --git a/net/mac80211/main.c b/net/mac80211/main.c +index e0c701c5e5f93..066424e62ff09 100644 +--- a/net/mac80211/main.c ++++ b/net/mac80211/main.c +@@ -443,7 +443,7 @@ static int ieee80211_ifa_changed(struct notifier_block *nb, + if (!wdev) + return NOTIFY_DONE; + +- if (wdev->wiphy != local->hw.wiphy) ++ if (wdev->wiphy != local->hw.wiphy || !wdev->registered) + return NOTIFY_DONE; + + sdata = IEEE80211_DEV_TO_SUB_IF(ndev); +@@ -458,6 +458,25 @@ static int ieee80211_ifa_changed(struct notifier_block *nb, + return NOTIFY_DONE; + + ifmgd = &sdata->u.mgd; ++ ++ /* ++ * The nested here is needed to convince lockdep that this is ++ * all OK. Yes, we lock the wiphy mutex here while we already ++ * hold the notifier rwsem, that's the normal case. And yes, ++ * we also acquire the notifier rwsem again when unregistering ++ * a netdev while we already hold the wiphy mutex, so it does ++ * look like a typical ABBA deadlock. ++ * ++ * However, both of these things happen with the RTNL held ++ * already. Therefore, they can't actually happen, since the ++ * lock orders really are ABC and ACB, which is fine due to ++ * the RTNL (A). ++ * ++ * We still need to prevent recursion, which is accomplished ++ * by the !wdev->registered check above. ++ */ ++ mutex_lock_nested(&local->hw.wiphy->mtx, 1); ++ __acquire(&local->hw.wiphy->mtx); + sdata_lock(sdata); + + /* Copy the addresses to the vif config list */ +@@ -476,6 +495,7 @@ static int ieee80211_ifa_changed(struct notifier_block *nb, + ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_ARP_FILTER); + + sdata_unlock(sdata); ++ wiphy_unlock(local->hw.wiphy); + + return NOTIFY_OK; + } +diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c +index 984f8f67492fd..5d71e8d084c45 100644 +--- a/net/mac80211/sta_info.c ++++ b/net/mac80211/sta_info.c +@@ -1284,6 +1284,8 @@ static int _sta_info_move_state(struct sta_info *sta, + enum ieee80211_sta_state new_state, + bool recalc) + { ++ struct ieee80211_local *local = sta->local; ++ + might_sleep(); + + if (sta->sta_state == new_state) +@@ -1359,6 +1361,24 @@ static int _sta_info_move_state(struct sta_info *sta, + } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { + ieee80211_vif_dec_num_mcast(sta->sdata); + clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); ++ ++ /* ++ * If we have encryption offload, flush (station) queues ++ * (after ensuring concurrent TX completed) so we won't ++ * transmit anything later unencrypted if/when keys are ++ * also removed, which might otherwise happen depending ++ * on how the hardware offload works. ++ */ ++ if (local->ops->set_key) { ++ synchronize_net(); ++ if (local->ops->flush_sta) ++ drv_flush_sta(local, sta->sdata, sta); ++ else ++ ieee80211_flush_queues(local, ++ sta->sdata, ++ false); ++ } ++ + ieee80211_clear_fast_xmit(sta); + ieee80211_clear_fast_rx(sta); + } +@@ -1402,6 +1422,20 @@ static void __sta_info_destroy_part2(struct sta_info *sta, bool recalc) + * after _part1 and before _part2! + */ + ++ /* ++ * There's a potential race in _part1 where we set WLAN_STA_BLOCK_BA ++ * but someone might have just gotten past a check, and not yet into ++ * queuing the work/creating the data/etc. ++ * ++ * Do another round of destruction so that the worker is certainly ++ * canceled before we later free the station. ++ * ++ * Since this is after synchronize_rcu()/synchronize_net() we're now ++ * certain that nobody can actually hold a reference to the STA and ++ * be calling e.g. ieee80211_start_tx_ba_session(). ++ */ ++ ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA); ++ + might_sleep(); + lockdep_assert_held(&local->sta_mtx); + +@@ -1410,18 +1444,6 @@ static void __sta_info_destroy_part2(struct sta_info *sta, bool recalc) + WARN_ON_ONCE(ret); + } + +- /* Flush queues before removing keys, as that might remove them +- * from hardware, and then depending on the offload method, any +- * frames sitting on hardware queues might be sent out without +- * any encryption at all. +- */ +- if (local->ops->set_key) { +- if (local->ops->flush_sta) +- drv_flush_sta(local, sta->sdata, sta); +- else +- ieee80211_flush_queues(local, sta->sdata, false); +- } +- + /* now keys can no longer be reached */ + ieee80211_free_sta_keys(local, sta); + +diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c +index 92ea4158f7fc4..a944490a724d3 100644 +--- a/net/mctp/test/route-test.c ++++ b/net/mctp/test/route-test.c +@@ -354,7 +354,7 @@ static void mctp_test_route_input_sk(struct kunit *test) + + skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc); + KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2); +- KUNIT_EXPECT_EQ(test, skb->len, 1); ++ KUNIT_EXPECT_EQ(test, skb2->len, 1); + + skb_free_datagram(sock->sk, skb2); + +diff --git a/net/mptcp/diag.c b/net/mptcp/diag.c +index 7017dd60659dc..b2199cc282384 100644 +--- a/net/mptcp/diag.c ++++ b/net/mptcp/diag.c +@@ -95,7 +95,7 @@ static size_t subflow_get_info_size(const struct sock *sk) + nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ */ + nla_total_size_64bit(8) + /* MPTCP_SUBFLOW_ATTR_MAP_SEQ */ + nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_MAP_SFSEQ */ +- nla_total_size(2) + /* MPTCP_SUBFLOW_ATTR_SSN_OFFSET */ ++ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_SSN_OFFSET */ + nla_total_size(2) + /* MPTCP_SUBFLOW_ATTR_MAP_DATALEN */ + nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_FLAGS */ + nla_total_size(1) + /* MPTCP_SUBFLOW_ATTR_ID_REM */ +diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c +index cf70a376398be..3346c615f3efe 100644 +--- a/net/mptcp/pm.c ++++ b/net/mptcp/pm.c +@@ -61,16 +61,6 @@ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_ + return 0; + } + +-int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) +-{ +- pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr); +- +- spin_lock_bh(&msk->pm.lock); +- mptcp_pm_nl_rm_subflow_received(msk, rm_list); +- spin_unlock_bh(&msk->pm.lock); +- return 0; +-} +- + /* path manager event handlers */ + + void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side) +@@ -445,9 +435,6 @@ int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id + *flags = 0; + *ifindex = 0; + +- if (!id) +- return 0; +- + if (mptcp_pm_is_userspace(msk)) + return mptcp_userspace_pm_get_flags_and_ifindex_by_id(msk, id, flags, ifindex); + return mptcp_pm_nl_get_flags_and_ifindex_by_id(msk, id, flags, ifindex); +diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c +index 2c49182c674f3..441af74621465 100644 +--- a/net/mptcp/pm_netlink.c ++++ b/net/mptcp/pm_netlink.c +@@ -148,11 +148,13 @@ static bool lookup_subflow_by_daddr(const struct list_head *list, + return false; + } + +-static struct mptcp_pm_addr_entry * ++static bool + select_local_address(const struct pm_nl_pernet *pernet, +- const struct mptcp_sock *msk) ++ const struct mptcp_sock *msk, ++ struct mptcp_pm_addr_entry *new_entry) + { +- struct mptcp_pm_addr_entry *entry, *ret = NULL; ++ struct mptcp_pm_addr_entry *entry; ++ bool found = false; + + msk_owned_by_me(msk); + +@@ -164,17 +166,21 @@ select_local_address(const struct pm_nl_pernet *pernet, + if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap)) + continue; + +- ret = entry; ++ *new_entry = *entry; ++ found = true; + break; + } + rcu_read_unlock(); +- return ret; ++ ++ return found; + } + +-static struct mptcp_pm_addr_entry * +-select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk) ++static bool ++select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk, ++ struct mptcp_pm_addr_entry *new_entry) + { +- struct mptcp_pm_addr_entry *entry, *ret = NULL; ++ struct mptcp_pm_addr_entry *entry; ++ bool found = false; + + rcu_read_lock(); + /* do not keep any additional per socket state, just signal +@@ -189,11 +195,13 @@ select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk) + if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) + continue; + +- ret = entry; ++ *new_entry = *entry; ++ found = true; + break; + } + rcu_read_unlock(); +- return ret; ++ ++ return found; + } + + unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk) +@@ -520,9 +528,10 @@ __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info, + + static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) + { +- struct mptcp_pm_addr_entry *local, *signal_and_subflow = NULL; + struct sock *sk = (struct sock *)msk; ++ struct mptcp_pm_addr_entry local; + unsigned int add_addr_signal_max; ++ bool signal_and_subflow = false; + unsigned int local_addr_max; + struct pm_nl_pernet *pernet; + unsigned int subflows_max; +@@ -573,23 +582,22 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) + if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL)) + return; + +- local = select_signal_address(pernet, msk); +- if (!local) ++ if (!select_signal_address(pernet, msk, &local)) + goto subflow; + + /* If the alloc fails, we are on memory pressure, not worth + * continuing, and trying to create subflows. + */ +- if (!mptcp_pm_alloc_anno_list(msk, &local->addr)) ++ if (!mptcp_pm_alloc_anno_list(msk, &local.addr)) + return; + +- __clear_bit(local->addr.id, msk->pm.id_avail_bitmap); ++ __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); + msk->pm.add_addr_signaled++; +- mptcp_pm_announce_addr(msk, &local->addr, false); ++ mptcp_pm_announce_addr(msk, &local.addr, false); + mptcp_pm_nl_addr_send_ack(msk); + +- if (local->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) +- signal_and_subflow = local; ++ if (local.flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) ++ signal_and_subflow = true; + } + + subflow: +@@ -600,26 +608,22 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) + bool fullmesh; + int i, nr; + +- if (signal_and_subflow) { +- local = signal_and_subflow; +- signal_and_subflow = NULL; +- } else { +- local = select_local_address(pernet, msk); +- if (!local) +- break; +- } ++ if (signal_and_subflow) ++ signal_and_subflow = false; ++ else if (!select_local_address(pernet, msk, &local)) ++ break; + +- fullmesh = !!(local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH); ++ fullmesh = !!(local.flags & MPTCP_PM_ADDR_FLAG_FULLMESH); + + msk->pm.local_addr_used++; +- __clear_bit(local->addr.id, msk->pm.id_avail_bitmap); +- nr = fill_remote_addresses_vec(msk, &local->addr, fullmesh, addrs); ++ __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); ++ nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs); + if (nr == 0) + continue; + + spin_unlock_bh(&msk->pm.lock); + for (i = 0; i < nr; i++) +- __mptcp_subflow_connect(sk, &local->addr, &addrs[i]); ++ __mptcp_subflow_connect(sk, &local.addr, &addrs[i]); + spin_lock_bh(&msk->pm.lock); + } + mptcp_pm_nl_check_work_pending(msk); +@@ -644,6 +648,7 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk, + { + struct sock *sk = (struct sock *)msk; + struct mptcp_pm_addr_entry *entry; ++ struct mptcp_addr_info mpc_addr; + struct pm_nl_pernet *pernet; + unsigned int subflows_max; + int i = 0; +@@ -651,6 +656,8 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk, + pernet = pm_nl_get_pernet_from_msk(msk); + subflows_max = mptcp_pm_get_subflows_max(msk); + ++ mptcp_local_address((struct sock_common *)msk, &mpc_addr); ++ + rcu_read_lock(); + list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { + if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH)) +@@ -661,7 +668,13 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk, + + if (msk->pm.subflows < subflows_max) { + msk->pm.subflows++; +- addrs[i++] = entry->addr; ++ addrs[i] = entry->addr; ++ ++ /* Special case for ID0: set the correct ID */ ++ if (mptcp_addresses_equal(&entry->addr, &mpc_addr, entry->addr.port)) ++ addrs[i].id = 0; ++ ++ i++; + } + } + rcu_read_unlock(); +@@ -837,25 +850,27 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk, + mptcp_close_ssk(sk, ssk, subflow); + spin_lock_bh(&msk->pm.lock); + +- removed = true; ++ removed |= subflow->request_join; + if (rm_type == MPTCP_MIB_RMSUBFLOW) + __MPTCP_INC_STATS(sock_net(sk), rm_type); + } +- if (rm_type == MPTCP_MIB_RMSUBFLOW) +- __set_bit(rm_id ? rm_id : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap); +- else if (rm_type == MPTCP_MIB_RMADDR) ++ ++ if (rm_type == MPTCP_MIB_RMADDR) + __MPTCP_INC_STATS(sock_net(sk), rm_type); ++ + if (!removed) + continue; + + if (!mptcp_pm_is_kernel(msk)) + continue; + +- if (rm_type == MPTCP_MIB_RMADDR) { +- msk->pm.add_addr_accepted--; +- WRITE_ONCE(msk->pm.accept_addr, true); +- } else if (rm_type == MPTCP_MIB_RMSUBFLOW) { +- msk->pm.local_addr_used--; ++ if (rm_type == MPTCP_MIB_RMADDR && rm_id && ++ !WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) { ++ /* Note: if the subflow has been closed before, this ++ * add_addr_accepted counter will not be decremented. ++ */ ++ if (--msk->pm.add_addr_accepted < mptcp_pm_get_add_addr_accept_max(msk)) ++ WRITE_ONCE(msk->pm.accept_addr, true); + } + } + } +@@ -865,8 +880,8 @@ static void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk) + mptcp_pm_nl_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR); + } + +-void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, +- const struct mptcp_rm_list *rm_list) ++static void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, ++ const struct mptcp_rm_list *rm_list) + { + mptcp_pm_nl_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW); + } +@@ -1424,6 +1439,10 @@ int mptcp_pm_nl_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int + struct sock *sk = (struct sock *)msk; + struct net *net = sock_net(sk); + ++ /* No entries with ID 0 */ ++ if (id == 0) ++ return 0; ++ + rcu_read_lock(); + entry = __lookup_addr_by_id(pm_nl_get_pernet(net), id); + if (entry) { +@@ -1462,13 +1481,24 @@ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk, + ret = remove_anno_list_by_saddr(msk, addr); + if (ret || force) { + spin_lock_bh(&msk->pm.lock); +- msk->pm.add_addr_signaled -= ret; ++ if (ret) { ++ __set_bit(addr->id, msk->pm.id_avail_bitmap); ++ msk->pm.add_addr_signaled--; ++ } + mptcp_pm_remove_addr(msk, &list); + spin_unlock_bh(&msk->pm.lock); + } + return ret; + } + ++static void __mark_subflow_endp_available(struct mptcp_sock *msk, u8 id) ++{ ++ /* If it was marked as used, and not ID 0, decrement local_addr_used */ ++ if (!__test_and_set_bit(id ? : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap) && ++ id && !WARN_ON_ONCE(msk->pm.local_addr_used == 0)) ++ msk->pm.local_addr_used--; ++} ++ + static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net, + const struct mptcp_pm_addr_entry *entry) + { +@@ -1497,8 +1527,19 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net, + remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr); + mptcp_pm_remove_anno_addr(msk, addr, remove_subflow && + !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)); +- if (remove_subflow) +- mptcp_pm_remove_subflow(msk, &list); ++ ++ if (remove_subflow) { ++ spin_lock_bh(&msk->pm.lock); ++ mptcp_pm_nl_rm_subflow_received(msk, &list); ++ spin_unlock_bh(&msk->pm.lock); ++ } ++ ++ if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { ++ spin_lock_bh(&msk->pm.lock); ++ __mark_subflow_endp_available(msk, list.ids[0]); ++ spin_unlock_bh(&msk->pm.lock); ++ } ++ + release_sock(sk); + + next: +@@ -1533,6 +1574,7 @@ static int mptcp_nl_remove_id_zero_address(struct net *net, + spin_lock_bh(&msk->pm.lock); + mptcp_pm_remove_addr(msk, &list); + mptcp_pm_nl_rm_subflow_received(msk, &list); ++ __mark_subflow_endp_available(msk, 0); + spin_unlock_bh(&msk->pm.lock); + release_sock(sk); + +@@ -1636,14 +1678,17 @@ void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk, + alist.ids[alist.nr++] = entry->addr.id; + } + ++ spin_lock_bh(&msk->pm.lock); + if (alist.nr) { +- spin_lock_bh(&msk->pm.lock); + msk->pm.add_addr_signaled -= alist.nr; + mptcp_pm_remove_addr(msk, &alist); +- spin_unlock_bh(&msk->pm.lock); + } + if (slist.nr) +- mptcp_pm_remove_subflow(msk, &slist); ++ mptcp_pm_nl_rm_subflow_received(msk, &slist); ++ /* Reset counters: maybe some subflows have been removed before */ ++ bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); ++ msk->pm.local_addr_used = 0; ++ spin_unlock_bh(&msk->pm.lock); + } + + static void mptcp_nl_remove_addrs_list(struct net *net, +@@ -1922,6 +1967,7 @@ static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk, + + spin_lock_bh(&msk->pm.lock); + mptcp_pm_nl_rm_subflow_received(msk, &list); ++ __mark_subflow_endp_available(msk, list.ids[0]); + mptcp_pm_create_subflow_or_signal_addr(msk); + spin_unlock_bh(&msk->pm.lock); + } +diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h +index 0201b1004a3b9..89dda24537d9e 100644 +--- a/net/mptcp/protocol.h ++++ b/net/mptcp/protocol.h +@@ -945,7 +945,6 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk, + const struct mptcp_addr_info *addr, + bool echo); + int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list); +-int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list); + void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list); + void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk, + struct list_head *rm_list); +@@ -1047,8 +1046,6 @@ static inline u8 subflow_get_local_id(const struct mptcp_subflow_context *subflo + + void __init mptcp_pm_nl_init(void); + void mptcp_pm_nl_work(struct mptcp_sock *msk); +-void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, +- const struct mptcp_rm_list *rm_list); + unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk); + unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk); + unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk); +diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c +index 6eef15648b7b0..b0f1991719324 100644 +--- a/net/netfilter/nf_flow_table_inet.c ++++ b/net/netfilter/nf_flow_table_inet.c +@@ -17,6 +17,9 @@ nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb, + + switch (skb->protocol) { + case htons(ETH_P_8021Q): ++ if (!pskb_may_pull(skb, skb_mac_offset(skb) + sizeof(*veth))) ++ return NF_ACCEPT; ++ + veth = (struct vlan_ethhdr *)skb_mac_header(skb); + proto = veth->h_vlan_encapsulated_proto; + break; +diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c +index 5383bed3d3e00..846fa2ad7c858 100644 +--- a/net/netfilter/nf_flow_table_ip.c ++++ b/net/netfilter/nf_flow_table_ip.c +@@ -281,6 +281,9 @@ static bool nf_flow_skb_encap_protocol(struct sk_buff *skb, __be16 proto, + + switch (skb->protocol) { + case htons(ETH_P_8021Q): ++ if (!pskb_may_pull(skb, skb_mac_offset(skb) + sizeof(*veth))) ++ return false; ++ + veth = (struct vlan_ethhdr *)skb_mac_header(skb); + if (veth->h_vlan_encapsulated_proto == proto) { + *offset += VLAN_HLEN; +diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c +index a010b25076ca0..3d46372b538e5 100644 +--- a/net/netfilter/nf_flow_table_offload.c ++++ b/net/netfilter/nf_flow_table_offload.c +@@ -841,8 +841,8 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable, + struct list_head *block_cb_list) + { + struct flow_cls_offload cls_flow = {}; ++ struct netlink_ext_ack extack = {}; + struct flow_block_cb *block_cb; +- struct netlink_ext_ack extack; + __be16 proto = ETH_P_ALL; + int err, i = 0; + +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index ea139fca74cb9..fc99a5e91829d 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -7717,28 +7717,26 @@ static void audit_log_obj_reset(const struct nft_table *table, + kfree(buf); + } + +-struct nft_obj_filter { ++struct nft_obj_dump_ctx { ++ unsigned int s_idx; + char *table; + u32 type; ++ bool reset; + }; + + static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb) + { + const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); +- const struct nft_table *table; +- unsigned int idx = 0, s_idx = cb->args[0]; +- struct nft_obj_filter *filter = cb->data; ++ struct nft_obj_dump_ctx *ctx = (void *)cb->ctx; + struct net *net = sock_net(skb->sk); + int family = nfmsg->nfgen_family; + struct nftables_pernet *nft_net; ++ const struct nft_table *table; + unsigned int entries = 0; + struct nft_object *obj; +- bool reset = false; ++ unsigned int idx = 0; + int rc = 0; + +- if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET) +- reset = true; +- + rcu_read_lock(); + nft_net = nft_pernet(net); + cb->seq = READ_ONCE(nft_net->base_seq); +@@ -7751,17 +7749,12 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb) + list_for_each_entry_rcu(obj, &table->objects, list) { + if (!nft_is_active(net, obj)) + goto cont; +- if (idx < s_idx) ++ if (idx < ctx->s_idx) + goto cont; +- if (idx > s_idx) +- memset(&cb->args[1], 0, +- sizeof(cb->args) - sizeof(cb->args[0])); +- if (filter && filter->table && +- strcmp(filter->table, table->name)) ++ if (ctx->table && strcmp(ctx->table, table->name)) + goto cont; +- if (filter && +- filter->type != NFT_OBJECT_UNSPEC && +- obj->ops->type->type != filter->type) ++ if (ctx->type != NFT_OBJECT_UNSPEC && ++ obj->ops->type->type != ctx->type) + goto cont; + + rc = nf_tables_fill_obj_info(skb, net, +@@ -7770,7 +7763,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb) + NFT_MSG_NEWOBJ, + NLM_F_MULTI | NLM_F_APPEND, + table->family, table, +- obj, reset); ++ obj, ctx->reset); + if (rc < 0) + break; + +@@ -7779,58 +7772,71 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb) + cont: + idx++; + } +- if (reset && entries) ++ if (ctx->reset && entries) + audit_log_obj_reset(table, nft_net->base_seq, entries); + if (rc < 0) + break; + } + rcu_read_unlock(); + +- cb->args[0] = idx; ++ ctx->s_idx = idx; + return skb->len; + } + ++static int nf_tables_dumpreset_obj(struct sk_buff *skb, ++ struct netlink_callback *cb) ++{ ++ struct nftables_pernet *nft_net = nft_pernet(sock_net(skb->sk)); ++ int ret; ++ ++ mutex_lock(&nft_net->commit_mutex); ++ ret = nf_tables_dump_obj(skb, cb); ++ mutex_unlock(&nft_net->commit_mutex); ++ ++ return ret; ++} ++ + static int nf_tables_dump_obj_start(struct netlink_callback *cb) + { ++ struct nft_obj_dump_ctx *ctx = (void *)cb->ctx; + const struct nlattr * const *nla = cb->data; +- struct nft_obj_filter *filter = NULL; + +- if (nla[NFTA_OBJ_TABLE] || nla[NFTA_OBJ_TYPE]) { +- filter = kzalloc(sizeof(*filter), GFP_ATOMIC); +- if (!filter) +- return -ENOMEM; +- +- if (nla[NFTA_OBJ_TABLE]) { +- filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC); +- if (!filter->table) { +- kfree(filter); +- return -ENOMEM; +- } +- } ++ BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); + +- if (nla[NFTA_OBJ_TYPE]) +- filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE])); ++ if (nla[NFTA_OBJ_TABLE]) { ++ ctx->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC); ++ if (!ctx->table) ++ return -ENOMEM; + } + +- cb->data = filter; ++ if (nla[NFTA_OBJ_TYPE]) ++ ctx->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE])); ++ + return 0; + } + ++static int nf_tables_dumpreset_obj_start(struct netlink_callback *cb) ++{ ++ struct nft_obj_dump_ctx *ctx = (void *)cb->ctx; ++ ++ ctx->reset = true; ++ ++ return nf_tables_dump_obj_start(cb); ++} ++ + static int nf_tables_dump_obj_done(struct netlink_callback *cb) + { +- struct nft_obj_filter *filter = cb->data; ++ struct nft_obj_dump_ctx *ctx = (void *)cb->ctx; + +- if (filter) { +- kfree(filter->table); +- kfree(filter); +- } ++ kfree(ctx->table); + + return 0; + } + + /* called with rcu_read_lock held */ +-static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info, +- const struct nlattr * const nla[]) ++static struct sk_buff * ++nf_tables_getobj_single(u32 portid, const struct nfnl_info *info, ++ const struct nlattr * const nla[], bool reset) + { + struct netlink_ext_ack *extack = info->extack; + u8 genmask = nft_genmask_cur(info->net); +@@ -7839,72 +7845,109 @@ static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info, + struct net *net = info->net; + struct nft_object *obj; + struct sk_buff *skb2; +- bool reset = false; + u32 objtype; + int err; + +- if (info->nlh->nlmsg_flags & NLM_F_DUMP) { +- struct netlink_dump_control c = { +- .start = nf_tables_dump_obj_start, +- .dump = nf_tables_dump_obj, +- .done = nf_tables_dump_obj_done, +- .module = THIS_MODULE, +- .data = (void *)nla, +- }; +- +- return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c); +- } +- + if (!nla[NFTA_OBJ_NAME] || + !nla[NFTA_OBJ_TYPE]) +- return -EINVAL; ++ return ERR_PTR(-EINVAL); + + table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask, 0); + if (IS_ERR(table)) { + NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_TABLE]); +- return PTR_ERR(table); ++ return ERR_CAST(table); + } + + objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE])); + obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask); + if (IS_ERR(obj)) { + NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_NAME]); +- return PTR_ERR(obj); ++ return ERR_CAST(obj); + } + + skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); + if (!skb2) +- return -ENOMEM; ++ return ERR_PTR(-ENOMEM); + +- if (NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET) +- reset = true; ++ err = nf_tables_fill_obj_info(skb2, net, portid, ++ info->nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0, ++ family, table, obj, reset); ++ if (err < 0) { ++ kfree_skb(skb2); ++ return ERR_PTR(err); ++ } + +- if (reset) { +- const struct nftables_pernet *nft_net; +- char *buf; ++ return skb2; ++} + +- nft_net = nft_pernet(net); +- buf = kasprintf(GFP_ATOMIC, "%s:%u", table->name, nft_net->base_seq); ++static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info, ++ const struct nlattr * const nla[]) ++{ ++ u32 portid = NETLINK_CB(skb).portid; ++ struct sk_buff *skb2; ++ ++ if (info->nlh->nlmsg_flags & NLM_F_DUMP) { ++ struct netlink_dump_control c = { ++ .start = nf_tables_dump_obj_start, ++ .dump = nf_tables_dump_obj, ++ .done = nf_tables_dump_obj_done, ++ .module = THIS_MODULE, ++ .data = (void *)nla, ++ }; + +- audit_log_nfcfg(buf, +- family, +- 1, +- AUDIT_NFT_OP_OBJ_RESET, +- GFP_ATOMIC); +- kfree(buf); ++ return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c); + } + +- err = nf_tables_fill_obj_info(skb2, net, NETLINK_CB(skb).portid, +- info->nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0, +- family, table, obj, reset); +- if (err < 0) +- goto err_fill_obj_info; ++ skb2 = nf_tables_getobj_single(portid, info, nla, false); ++ if (IS_ERR(skb2)) ++ return PTR_ERR(skb2); + +- return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); ++ return nfnetlink_unicast(skb2, info->net, portid); ++} + +-err_fill_obj_info: +- kfree_skb(skb2); +- return err; ++static int nf_tables_getobj_reset(struct sk_buff *skb, ++ const struct nfnl_info *info, ++ const struct nlattr * const nla[]) ++{ ++ struct nftables_pernet *nft_net = nft_pernet(info->net); ++ u32 portid = NETLINK_CB(skb).portid; ++ struct net *net = info->net; ++ struct sk_buff *skb2; ++ char *buf; ++ ++ if (info->nlh->nlmsg_flags & NLM_F_DUMP) { ++ struct netlink_dump_control c = { ++ .start = nf_tables_dumpreset_obj_start, ++ .dump = nf_tables_dumpreset_obj, ++ .done = nf_tables_dump_obj_done, ++ .module = THIS_MODULE, ++ .data = (void *)nla, ++ }; ++ ++ return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c); ++ } ++ ++ if (!try_module_get(THIS_MODULE)) ++ return -EINVAL; ++ rcu_read_unlock(); ++ mutex_lock(&nft_net->commit_mutex); ++ skb2 = nf_tables_getobj_single(portid, info, nla, true); ++ mutex_unlock(&nft_net->commit_mutex); ++ rcu_read_lock(); ++ module_put(THIS_MODULE); ++ ++ if (IS_ERR(skb2)) ++ return PTR_ERR(skb2); ++ ++ buf = kasprintf(GFP_ATOMIC, "%.*s:%u", ++ nla_len(nla[NFTA_OBJ_TABLE]), ++ (char *)nla_data(nla[NFTA_OBJ_TABLE]), ++ nft_net->base_seq); ++ audit_log_nfcfg(buf, info->nfmsg->nfgen_family, 1, ++ AUDIT_NFT_OP_OBJ_RESET, GFP_ATOMIC); ++ kfree(buf); ++ ++ return nfnetlink_unicast(skb2, net, portid); + } + + static void nft_obj_destroy(const struct nft_ctx *ctx, struct nft_object *obj) +@@ -9182,7 +9225,7 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = { + .policy = nft_obj_policy, + }, + [NFT_MSG_GETOBJ_RESET] = { +- .call = nf_tables_getobj, ++ .call = nf_tables_getobj_reset, + .type = NFNL_CB_RCU, + .attr_count = NFTA_OBJ_MAX, + .policy = nft_obj_policy, +diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c +index dfc856b3e1fa4..09209b4952ad1 100644 +--- a/net/netfilter/nfnetlink_queue.c ++++ b/net/netfilter/nfnetlink_queue.c +@@ -668,10 +668,41 @@ static bool nf_ct_drop_unconfirmed(const struct nf_queue_entry *entry) + { + #if IS_ENABLED(CONFIG_NF_CONNTRACK) + static const unsigned long flags = IPS_CONFIRMED | IPS_DYING; +- const struct nf_conn *ct = (void *)skb_nfct(entry->skb); ++ struct nf_conn *ct = (void *)skb_nfct(entry->skb); ++ unsigned long status; ++ unsigned int use; + +- if (ct && ((ct->status & flags) == IPS_DYING)) ++ if (!ct) ++ return false; ++ ++ status = READ_ONCE(ct->status); ++ if ((status & flags) == IPS_DYING) + return true; ++ ++ if (status & IPS_CONFIRMED) ++ return false; ++ ++ /* in some cases skb_clone() can occur after initial conntrack ++ * pickup, but conntrack assumes exclusive skb->_nfct ownership for ++ * unconfirmed entries. ++ * ++ * This happens for br_netfilter and with ip multicast routing. ++ * We can't be solved with serialization here because one clone could ++ * have been queued for local delivery. ++ */ ++ use = refcount_read(&ct->ct_general.use); ++ if (likely(use == 1)) ++ return false; ++ ++ /* Can't decrement further? Exclusive ownership. */ ++ if (!refcount_dec_not_one(&ct->ct_general.use)) ++ return false; ++ ++ skb_set_nfct(entry->skb, 0); ++ /* No nf_ct_put(): we already decremented .use and it cannot ++ * drop down to 0. ++ */ ++ return true; + #endif + return false; + } +diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c +index dccc68a5135ad..b7aa4d2c8c22f 100644 +--- a/net/netfilter/nft_counter.c ++++ b/net/netfilter/nft_counter.c +@@ -107,11 +107,16 @@ static void nft_counter_reset(struct nft_counter_percpu_priv *priv, + struct nft_counter *total) + { + struct nft_counter *this_cpu; ++ seqcount_t *myseq; + + local_bh_disable(); + this_cpu = this_cpu_ptr(priv->counter); ++ myseq = this_cpu_ptr(&nft_counter_seq); ++ ++ write_seqcount_begin(myseq); + this_cpu->packets -= total->packets; + this_cpu->bytes -= total->bytes; ++ write_seqcount_end(myseq); + local_bh_enable(); + } + +@@ -265,7 +270,7 @@ static void nft_counter_offload_stats(struct nft_expr *expr, + struct nft_counter *this_cpu; + seqcount_t *myseq; + +- preempt_disable(); ++ local_bh_disable(); + this_cpu = this_cpu_ptr(priv->counter); + myseq = this_cpu_ptr(&nft_counter_seq); + +@@ -273,7 +278,7 @@ static void nft_counter_offload_stats(struct nft_expr *expr, + this_cpu->packets += stats->pkts; + this_cpu->bytes += stats->bytes; + write_seqcount_end(myseq); +- preempt_enable(); ++ local_bh_enable(); + } + + void nft_counter_init_seqcount(void) +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index 6ae782efb1ee3..db49fc4d42cf7 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -130,7 +130,7 @@ static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = { + "nlk_cb_mutex-MAX_LINKS" + }; + +-static int netlink_dump(struct sock *sk); ++static int netlink_dump(struct sock *sk, bool lock_taken); + + /* nl_table locking explained: + * Lookup and traversal are protected with an RCU read-side lock. Insertion +@@ -1989,7 +1989,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + + if (READ_ONCE(nlk->cb_running) && + atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { +- ret = netlink_dump(sk); ++ ret = netlink_dump(sk, false); + if (ret) { + WRITE_ONCE(sk->sk_err, -ret); + sk_error_report(sk); +@@ -2198,7 +2198,7 @@ static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb, + return 0; + } + +-static int netlink_dump(struct sock *sk) ++static int netlink_dump(struct sock *sk, bool lock_taken) + { + struct netlink_sock *nlk = nlk_sk(sk); + struct netlink_ext_ack extack = {}; +@@ -2210,7 +2210,8 @@ static int netlink_dump(struct sock *sk) + int alloc_min_size; + int alloc_size; + +- mutex_lock(nlk->cb_mutex); ++ if (!lock_taken) ++ mutex_lock(nlk->cb_mutex); + if (!nlk->cb_running) { + err = -EINVAL; + goto errout_skb; +@@ -2367,9 +2368,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, + WRITE_ONCE(nlk->cb_running, true); + nlk->dump_done_errno = INT_MAX; + +- mutex_unlock(nlk->cb_mutex); +- +- ret = netlink_dump(sk); ++ ret = netlink_dump(sk, true); + + sock_put(sk); + +diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c +index 11c69415c6052..b7232142c13f8 100644 +--- a/net/openvswitch/datapath.c ++++ b/net/openvswitch/datapath.c +@@ -2707,7 +2707,7 @@ static struct pernet_operations ovs_net_ops = { + }; + + static const char * const ovs_drop_reasons[] = { +-#define S(x) (#x), ++#define S(x) [(x) & ~SKB_DROP_REASON_SUBSYS_MASK] = (#x), + OVS_DROP_REASONS(S) + #undef S + }; +diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c +index 6b32d61d4cdc4..ad6c57a9f27c7 100644 +--- a/net/rxrpc/rxkad.c ++++ b/net/rxrpc/rxkad.c +@@ -259,7 +259,7 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call, + + _enter(""); + +- check = txb->seq ^ ntohl(txb->wire.callNumber); ++ check = txb->seq ^ call->call_id; + hdr->data_size = htonl((u32)check << 16 | txb->len); + + txb->len += sizeof(struct rxkad_level1_hdr); +@@ -302,7 +302,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, + + _enter(""); + +- check = txb->seq ^ ntohl(txb->wire.callNumber); ++ check = txb->seq ^ call->call_id; + + rxkhdr->data_size = htonl(txb->len | (u32)check << 16); + rxkhdr->checksum = 0; +@@ -362,9 +362,9 @@ static int rxkad_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb) + memcpy(&iv, call->conn->rxkad.csum_iv.x, sizeof(iv)); + + /* calculate the security checksum */ +- x = (ntohl(txb->wire.cid) & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT); ++ x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT); + x |= txb->seq & 0x3fffffff; +- crypto.buf[0] = txb->wire.callNumber; ++ crypto.buf[0] = htonl(call->call_id); + crypto.buf[1] = htonl(x); + + sg_init_one(&sg, crypto.buf, 8); +diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c +index 4ad39a4a3cf5b..0224a92492453 100644 +--- a/net/sched/sch_netem.c ++++ b/net/sched/sch_netem.c +@@ -446,12 +446,10 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct netem_sched_data *q = qdisc_priv(sch); + /* We don't fill cb now as skb_unshare() may invalidate it */ + struct netem_skb_cb *cb; +- struct sk_buff *skb2; ++ struct sk_buff *skb2 = NULL; + struct sk_buff *segs = NULL; + unsigned int prev_len = qdisc_pkt_len(skb); + int count = 1; +- int rc = NET_XMIT_SUCCESS; +- int rc_drop = NET_XMIT_DROP; + + /* Do not fool qdisc_drop_all() */ + skb->prev = NULL; +@@ -480,19 +478,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, + skb_orphan_partial(skb); + + /* +- * If we need to duplicate packet, then re-insert at top of the +- * qdisc tree, since parent queuer expects that only one +- * skb will be queued. ++ * If we need to duplicate packet, then clone it before ++ * original is modified. + */ +- if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { +- struct Qdisc *rootq = qdisc_root_bh(sch); +- u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ +- +- q->duplicate = 0; +- rootq->enqueue(skb2, rootq, to_free); +- q->duplicate = dupsave; +- rc_drop = NET_XMIT_SUCCESS; +- } ++ if (count > 1) ++ skb2 = skb_clone(skb, GFP_ATOMIC); + + /* + * Randomized packet corruption. +@@ -504,7 +494,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, + if (skb_is_gso(skb)) { + skb = netem_segment(skb, sch, to_free); + if (!skb) +- return rc_drop; ++ goto finish_segs; ++ + segs = skb->next; + skb_mark_not_on_list(skb); + qdisc_skb_cb(skb)->pkt_len = skb->len; +@@ -530,7 +521,24 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, + /* re-link segs, so that qdisc_drop_all() frees them all */ + skb->next = segs; + qdisc_drop_all(skb, sch, to_free); +- return rc_drop; ++ if (skb2) ++ __qdisc_drop(skb2, to_free); ++ return NET_XMIT_DROP; ++ } ++ ++ /* ++ * If doing duplication then re-insert at top of the ++ * qdisc tree, since parent queuer expects that only one ++ * skb will be queued. ++ */ ++ if (skb2) { ++ struct Qdisc *rootq = qdisc_root_bh(sch); ++ u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ ++ ++ q->duplicate = 0; ++ rootq->enqueue(skb2, rootq, to_free); ++ q->duplicate = dupsave; ++ skb2 = NULL; + } + + qdisc_qstats_backlog_inc(sch, skb); +@@ -601,9 +609,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, + } + + finish_segs: ++ if (skb2) ++ __qdisc_drop(skb2, to_free); ++ + if (segs) { + unsigned int len, last_len; +- int nb; ++ int rc, nb; + + len = skb ? skb->len : 0; + nb = skb ? 1 : 0; +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c +index 4afb6a541cf38..f5eb737a677d9 100644 +--- a/net/vmw_vsock/af_vsock.c ++++ b/net/vmw_vsock/af_vsock.c +@@ -1270,25 +1270,28 @@ static int vsock_dgram_connect(struct socket *sock, + return err; + } + ++int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, ++ size_t len, int flags) ++{ ++ struct sock *sk = sock->sk; ++ struct vsock_sock *vsk = vsock_sk(sk); ++ ++ return vsk->transport->dgram_dequeue(vsk, msg, len, flags); ++} ++ + int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, + size_t len, int flags) + { + #ifdef CONFIG_BPF_SYSCALL ++ struct sock *sk = sock->sk; + const struct proto *prot; +-#endif +- struct vsock_sock *vsk; +- struct sock *sk; + +- sk = sock->sk; +- vsk = vsock_sk(sk); +- +-#ifdef CONFIG_BPF_SYSCALL + prot = READ_ONCE(sk->sk_prot); + if (prot != &vsock_proto) + return prot->recvmsg(sk, msg, len, flags, NULL); + #endif + +- return vsk->transport->dgram_dequeue(vsk, msg, len, flags); ++ return __vsock_dgram_recvmsg(sock, msg, len, flags); + } + EXPORT_SYMBOL_GPL(vsock_dgram_recvmsg); + +@@ -2124,15 +2127,12 @@ static int __vsock_seqpacket_recvmsg(struct sock *sk, struct msghdr *msg, + } + + int +-vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, +- int flags) ++__vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, ++ int flags) + { + struct sock *sk; + struct vsock_sock *vsk; + const struct vsock_transport *transport; +-#ifdef CONFIG_BPF_SYSCALL +- const struct proto *prot; +-#endif + int err; + + sk = sock->sk; +@@ -2183,14 +2183,6 @@ vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + goto out; + } + +-#ifdef CONFIG_BPF_SYSCALL +- prot = READ_ONCE(sk->sk_prot); +- if (prot != &vsock_proto) { +- release_sock(sk); +- return prot->recvmsg(sk, msg, len, flags, NULL); +- } +-#endif +- + if (sk->sk_type == SOCK_STREAM) + err = __vsock_stream_recvmsg(sk, msg, len, flags); + else +@@ -2200,6 +2192,22 @@ vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + release_sock(sk); + return err; + } ++ ++int ++vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, ++ int flags) ++{ ++#ifdef CONFIG_BPF_SYSCALL ++ struct sock *sk = sock->sk; ++ const struct proto *prot; ++ ++ prot = READ_ONCE(sk->sk_prot); ++ if (prot != &vsock_proto) ++ return prot->recvmsg(sk, msg, len, flags, NULL); ++#endif ++ ++ return __vsock_connectible_recvmsg(sock, msg, len, flags); ++} + EXPORT_SYMBOL_GPL(vsock_connectible_recvmsg); + + static int vsock_set_rcvlowat(struct sock *sk, int val) +diff --git a/net/vmw_vsock/vsock_bpf.c b/net/vmw_vsock/vsock_bpf.c +index a3c97546ab84a..c42c5cc18f324 100644 +--- a/net/vmw_vsock/vsock_bpf.c ++++ b/net/vmw_vsock/vsock_bpf.c +@@ -64,9 +64,9 @@ static int __vsock_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int + int err; + + if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) +- err = vsock_connectible_recvmsg(sock, msg, len, flags); ++ err = __vsock_connectible_recvmsg(sock, msg, len, flags); + else if (sk->sk_type == SOCK_DGRAM) +- err = vsock_dgram_recvmsg(sock, msg, len, flags); ++ err = __vsock_dgram_recvmsg(sock, msg, len, flags); + else + err = -EPROTOTYPE; + +diff --git a/net/wireless/core.h b/net/wireless/core.h +index f0a3a23176385..c955be6c6daa4 100644 +--- a/net/wireless/core.h ++++ b/net/wireless/core.h +@@ -228,6 +228,7 @@ void cfg80211_register_wdev(struct cfg80211_registered_device *rdev, + static inline void wdev_lock(struct wireless_dev *wdev) + __acquires(wdev) + { ++ lockdep_assert_held(&wdev->wiphy->mtx); + mutex_lock(&wdev->mtx); + __acquire(wdev->mtx); + } +@@ -235,11 +236,16 @@ static inline void wdev_lock(struct wireless_dev *wdev) + static inline void wdev_unlock(struct wireless_dev *wdev) + __releases(wdev) + { ++ lockdep_assert_held(&wdev->wiphy->mtx); + __release(wdev->mtx); + mutex_unlock(&wdev->mtx); + } + +-#define ASSERT_WDEV_LOCK(wdev) lockdep_assert_held(&(wdev)->mtx) ++static inline void ASSERT_WDEV_LOCK(struct wireless_dev *wdev) ++{ ++ lockdep_assert_held(&wdev->wiphy->mtx); ++ lockdep_assert_held(&wdev->mtx); ++} + + static inline bool cfg80211_has_monitors_only(struct cfg80211_registered_device *rdev) + { +diff --git a/scripts/rust_is_available.sh b/scripts/rust_is_available.sh +index 117018946b577..a6fdcf13e0e53 100755 +--- a/scripts/rust_is_available.sh ++++ b/scripts/rust_is_available.sh +@@ -129,8 +129,12 @@ fi + # Check that the Rust bindings generator is suitable. + # + # Non-stable and distributions' versions may have a version suffix, e.g. `-dev`. ++# ++# The dummy parameter `workaround-for-0.69.0` is required to support 0.69.0 ++# (https://github.com/rust-lang/rust-bindgen/pull/2678). It can be removed when ++# the minimum version is upgraded past that (0.69.1 already fixed the issue). + rust_bindings_generator_output=$( \ +- LC_ALL=C "$BINDGEN" --version 2>/dev/null ++ LC_ALL=C "$BINDGEN" --version workaround-for-0.69.0 2>/dev/null + ) || rust_bindings_generator_code=$? + if [ -n "$rust_bindings_generator_code" ]; then + echo >&2 "***" +diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c +index ff9a939dad8e4..2393230c03aa3 100644 +--- a/security/integrity/evm/evm_main.c ++++ b/security/integrity/evm/evm_main.c +@@ -864,6 +864,13 @@ void evm_inode_post_setattr(struct dentry *dentry, int ia_valid) + evm_update_evmxattr(dentry, NULL, NULL, 0); + } + ++int evm_inode_copy_up_xattr(const char *name) ++{ ++ if (strcmp(name, XATTR_NAME_EVM) == 0) ++ return 1; /* Discard */ ++ return -EOPNOTSUPP; ++} ++ + /* + * evm_inode_init_security - initializes security.evm HMAC value + */ +diff --git a/security/security.c b/security/security.c +index dd26f21b2244b..b6144833c7a8e 100644 +--- a/security/security.c ++++ b/security/security.c +@@ -2539,7 +2539,7 @@ int security_inode_copy_up_xattr(const char *name) + return rc; + } + +- return LSM_RET_DEFAULT(inode_copy_up_xattr); ++ return evm_inode_copy_up_xattr(name); + } + EXPORT_SYMBOL(security_inode_copy_up_xattr); + +diff --git a/security/selinux/avc.c b/security/selinux/avc.c +index 32eb67fb3e42c..b49c44869dc46 100644 +--- a/security/selinux/avc.c ++++ b/security/selinux/avc.c +@@ -330,12 +330,12 @@ static int avc_add_xperms_decision(struct avc_node *node, + { + struct avc_xperms_decision_node *dest_xpd; + +- node->ae.xp_node->xp.len++; + dest_xpd = avc_xperms_decision_alloc(src->used); + if (!dest_xpd) + return -ENOMEM; + avc_copy_xperms_decision(&dest_xpd->xpd, src); + list_add(&dest_xpd->xpd_list, &node->ae.xp_node->xpd_head); ++ node->ae.xp_node->xp.len++; + return 0; + } + +@@ -907,7 +907,11 @@ static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid, + node->ae.avd.auditdeny &= ~perms; + break; + case AVC_CALLBACK_ADD_XPERMS: +- avc_add_xperms_decision(node, xpd); ++ rc = avc_add_xperms_decision(node, xpd); ++ if (rc) { ++ avc_node_kill(node); ++ goto out_unlock; ++ } + break; + } + avc_node_replace(node, orig); +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c +index 53cfeefb2f194..d32d16d75795a 100644 +--- a/security/selinux/hooks.c ++++ b/security/selinux/hooks.c +@@ -3835,7 +3835,17 @@ static int selinux_file_mprotect(struct vm_area_struct *vma, + if (default_noexec && + (prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) { + int rc = 0; +- if (vma_is_initial_heap(vma)) { ++ /* ++ * We don't use the vma_is_initial_heap() helper as it has ++ * a history of problems and is currently broken on systems ++ * where there is no heap, e.g. brk == start_brk. Before ++ * replacing the conditional below with vma_is_initial_heap(), ++ * or something similar, please ensure that the logic is the ++ * same as what we have below or you have tested every possible ++ * corner case you can think to test. ++ */ ++ if (vma->vm_start >= vma->vm_mm->start_brk && ++ vma->vm_end <= vma->vm_mm->brk) { + rc = avc_has_perm(sid, sid, SECCLASS_PROCESS, + PROCESS__EXECHEAP, NULL); + } else if (!vma->vm_file && (vma_is_initial_stack(vma) || +diff --git a/sound/core/timer.c b/sound/core/timer.c +index a0b515981ee9b..230babace502d 100644 +--- a/sound/core/timer.c ++++ b/sound/core/timer.c +@@ -556,7 +556,7 @@ static int snd_timer_start1(struct snd_timer_instance *timeri, + /* check the actual time for the start tick; + * bail out as error if it's way too low (< 100us) + */ +- if (start) { ++ if (start && !(timer->hw.flags & SNDRV_TIMER_HW_SLAVE)) { + if ((u64)snd_timer_hw_resolution(timer) * ticks < 100000) { + result = -EINVAL; + goto unlock; +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 82dcea2b78000..5736516275a34 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -585,7 +585,6 @@ static void alc_shutup_pins(struct hda_codec *codec) + switch (codec->core.vendor_id) { + case 0x10ec0236: + case 0x10ec0256: +- case 0x10ec0257: + case 0x19e58326: + case 0x10ec0283: + case 0x10ec0285: +diff --git a/sound/pci/hda/tas2781_hda_i2c.c b/sound/pci/hda/tas2781_hda_i2c.c +index 75cc3676c1b92..e5bb1fed26a0c 100644 +--- a/sound/pci/hda/tas2781_hda_i2c.c ++++ b/sound/pci/hda/tas2781_hda_i2c.c +@@ -2,10 +2,12 @@ + // + // TAS2781 HDA I2C driver + // +-// Copyright 2023 Texas Instruments, Inc. ++// Copyright 2023 - 2024 Texas Instruments, Inc. + // + // Author: Shenghao Ding ++// Current maintainer: Baojun Xu + ++#include + #include + #include + #include +@@ -425,20 +427,22 @@ static void tas2781_apply_calib(struct tasdevice_priv *tas_priv) + static const unsigned char rgno_array[CALIB_MAX] = { + 0x74, 0x0c, 0x14, 0x70, 0x7c, + }; +- unsigned char *data; ++ int offset = 0; + int i, j, rc; ++ __be32 data; + + for (i = 0; i < tas_priv->ndev; i++) { +- data = tas_priv->cali_data.data + +- i * TASDEVICE_SPEAKER_CALIBRATION_SIZE; + for (j = 0; j < CALIB_MAX; j++) { ++ data = cpu_to_be32( ++ *(uint32_t *)&tas_priv->cali_data.data[offset]); + rc = tasdevice_dev_bulk_write(tas_priv, i, + TASDEVICE_REG(0, page_array[j], rgno_array[j]), +- &(data[4 * j]), 4); ++ (unsigned char *)&data, 4); + if (rc < 0) + dev_err(tas_priv->dev, + "chn %d calib %d bulk_wr err = %d\n", + i, j, rc); ++ offset += 4; + } + } + } +diff --git a/sound/soc/codecs/cs35l45.c b/sound/soc/codecs/cs35l45.c +index 9b9fc2d491089..7e439c778c6b4 100644 +--- a/sound/soc/codecs/cs35l45.c ++++ b/sound/soc/codecs/cs35l45.c +@@ -1067,7 +1067,10 @@ static irqreturn_t cs35l45_spk_safe_err(int irq, void *data) + + i = irq - regmap_irq_get_virq(cs35l45->irq_data, 0); + +- dev_err(cs35l45->dev, "%s condition detected!\n", cs35l45_irqs[i].name); ++ if (i < 0 || i >= ARRAY_SIZE(cs35l45_irqs)) ++ dev_err(cs35l45->dev, "Unspecified global error condition (%d) detected!\n", irq); ++ else ++ dev_err(cs35l45->dev, "%s condition detected!\n", cs35l45_irqs[i].name); + + return IRQ_HANDLED; + } +diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c +index e80a2a5ec56a1..1506982a56c30 100644 +--- a/sound/soc/sof/intel/hda-dsp.c ++++ b/sound/soc/sof/intel/hda-dsp.c +@@ -709,6 +709,9 @@ static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend) + if (ret < 0) + return ret; + ++ /* make sure that no irq handler is pending before shutdown */ ++ synchronize_irq(sdev->ipc_irq); ++ + hda_codec_jack_wake_enable(sdev, runtime_suspend); + + /* power down all hda links */ +diff --git a/sound/soc/sof/ipc4.c b/sound/soc/sof/ipc4.c +index 1b09496733fb8..81d1ce4b5f0cd 100644 +--- a/sound/soc/sof/ipc4.c ++++ b/sound/soc/sof/ipc4.c +@@ -629,7 +629,14 @@ static void sof_ipc4_rx_msg(struct snd_sof_dev *sdev) + return; + + ipc4_msg->data_size = data_size; +- snd_sof_ipc_msg_data(sdev, NULL, ipc4_msg->data_ptr, ipc4_msg->data_size); ++ err = snd_sof_ipc_msg_data(sdev, NULL, ipc4_msg->data_ptr, ipc4_msg->data_size); ++ if (err < 0) { ++ dev_err(sdev->dev, "failed to read IPC notification data: %d\n", err); ++ kfree(ipc4_msg->data_ptr); ++ ipc4_msg->data_ptr = NULL; ++ ipc4_msg->data_size = 0; ++ return; ++ } + } + + sof_ipc4_log_header(sdev->dev, "ipc rx done ", ipc4_msg, true); +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h +index af1b8cf5a9883..d2aa97a5c438c 100644 +--- a/sound/usb/quirks-table.h ++++ b/sound/usb/quirks-table.h +@@ -273,6 +273,7 @@ YAMAHA_DEVICE(0x105a, NULL), + YAMAHA_DEVICE(0x105b, NULL), + YAMAHA_DEVICE(0x105c, NULL), + YAMAHA_DEVICE(0x105d, NULL), ++YAMAHA_DEVICE(0x1718, "P-125"), + { + USB_DEVICE(0x0499, 0x1503), + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c +index b437b14d838ac..9ba985c6a4083 100644 +--- a/sound/usb/quirks.c ++++ b/sound/usb/quirks.c +@@ -2181,6 +2181,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { + QUIRK_FLAG_GENERIC_IMPLICIT_FB), + DEVICE_FLG(0x2b53, 0x0031, /* Fiero SC-01 (firmware v1.1.0) */ + QUIRK_FLAG_GENERIC_IMPLICIT_FB), ++ DEVICE_FLG(0x2d95, 0x8021, /* VIVO USB-C-XE710 HEADSET */ ++ QUIRK_FLAG_CTL_MSG_DELAY_1M), + DEVICE_FLG(0x30be, 0x0101, /* Schiit Hel */ + QUIRK_FLAG_IGNORE_CTL_ERROR), + DEVICE_FLG(0x413c, 0xa506, /* Dell AE515 sound bar */ +diff --git a/tools/include/linux/align.h b/tools/include/linux/align.h +new file mode 100644 +index 0000000000000..14e34ace80dda +--- /dev/null ++++ b/tools/include/linux/align.h +@@ -0,0 +1,12 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++ ++#ifndef _TOOLS_LINUX_ALIGN_H ++#define _TOOLS_LINUX_ALIGN_H ++ ++#include ++ ++#define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) ++#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) ++#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) ++ ++#endif /* _TOOLS_LINUX_ALIGN_H */ +diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h +index f3566ea0f932e..210c13b1b8570 100644 +--- a/tools/include/linux/bitmap.h ++++ b/tools/include/linux/bitmap.h +@@ -3,6 +3,7 @@ + #define _TOOLS_LINUX_BITMAP_H + + #include ++#include + #include + #include + #include +@@ -25,13 +26,14 @@ bool __bitmap_intersects(const unsigned long *bitmap1, + #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) + #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) + ++#define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE) ++ + static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) + { + if (small_const_nbits(nbits)) + *dst = 0UL; + else { +- int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); +- memset(dst, 0, len); ++ memset(dst, 0, bitmap_size(nbits)); + } + } + +@@ -83,7 +85,7 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, + */ + static inline unsigned long *bitmap_zalloc(int nbits) + { +- return calloc(1, BITS_TO_LONGS(nbits) * sizeof(unsigned long)); ++ return calloc(1, bitmap_size(nbits)); + } + + /* +@@ -126,7 +128,6 @@ static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1, + #define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long)) + #endif + #define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1) +-#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) + + static inline bool bitmap_equal(const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +diff --git a/tools/include/linux/mm.h b/tools/include/linux/mm.h +index 7d73da0980473..dc0fc7125bc31 100644 +--- a/tools/include/linux/mm.h ++++ b/tools/include/linux/mm.h +@@ -2,8 +2,8 @@ + #ifndef _TOOLS_LINUX_MM_H + #define _TOOLS_LINUX_MM_H + ++#include + #include +-#include + + #define PAGE_SHIFT 12 + #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) +@@ -11,9 +11,6 @@ + + #define PHYS_ADDR_MAX (~(phys_addr_t)0) + +-#define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) +-#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) +- + #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) + + #define __va(x) ((void *)((unsigned long)(x))) +diff --git a/tools/testing/selftests/bpf/progs/cpumask_failure.c b/tools/testing/selftests/bpf/progs/cpumask_failure.c +index a9bf6ea336cf6..a988d2823b528 100644 +--- a/tools/testing/selftests/bpf/progs/cpumask_failure.c ++++ b/tools/testing/selftests/bpf/progs/cpumask_failure.c +@@ -61,11 +61,8 @@ SEC("tp_btf/task_newtask") + __failure __msg("bpf_cpumask_set_cpu args#1 expected pointer to STRUCT bpf_cpumask") + int BPF_PROG(test_mutate_cpumask, struct task_struct *task, u64 clone_flags) + { +- struct bpf_cpumask *cpumask; +- + /* Can't set the CPU of a non-struct bpf_cpumask. */ + bpf_cpumask_set_cpu(0, (struct bpf_cpumask *)task->cpus_ptr); +- __sink(cpumask); + + return 0; + } +diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c +index 7ce7e827d5f01..66a60bfb58672 100644 +--- a/tools/testing/selftests/bpf/progs/dynptr_fail.c ++++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c +@@ -80,7 +80,7 @@ SEC("?raw_tp") + __failure __msg("Unreleased reference id=2") + int ringbuf_missing_release1(void *ctx) + { +- struct bpf_dynptr ptr; ++ struct bpf_dynptr ptr = {}; + + bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); + +@@ -1385,7 +1385,7 @@ SEC("?raw_tp") + __failure __msg("Expected an initialized dynptr as arg #1") + int dynptr_adjust_invalid(void *ctx) + { +- struct bpf_dynptr ptr; ++ struct bpf_dynptr ptr = {}; + + /* this should fail */ + bpf_dynptr_adjust(&ptr, 1, 2); +@@ -1398,7 +1398,7 @@ SEC("?raw_tp") + __failure __msg("Expected an initialized dynptr as arg #1") + int dynptr_is_null_invalid(void *ctx) + { +- struct bpf_dynptr ptr; ++ struct bpf_dynptr ptr = {}; + + /* this should fail */ + bpf_dynptr_is_null(&ptr); +@@ -1411,7 +1411,7 @@ SEC("?raw_tp") + __failure __msg("Expected an initialized dynptr as arg #1") + int dynptr_is_rdonly_invalid(void *ctx) + { +- struct bpf_dynptr ptr; ++ struct bpf_dynptr ptr = {}; + + /* this should fail */ + bpf_dynptr_is_rdonly(&ptr); +@@ -1424,7 +1424,7 @@ SEC("?raw_tp") + __failure __msg("Expected an initialized dynptr as arg #1") + int dynptr_size_invalid(void *ctx) + { +- struct bpf_dynptr ptr; ++ struct bpf_dynptr ptr = {}; + + /* this should fail */ + bpf_dynptr_size(&ptr); +@@ -1437,7 +1437,7 @@ SEC("?raw_tp") + __failure __msg("Expected an initialized dynptr as arg #1") + int clone_invalid1(void *ctx) + { +- struct bpf_dynptr ptr1; ++ struct bpf_dynptr ptr1 = {}; + struct bpf_dynptr ptr2; + + /* this should fail */ +diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c +index c20c4e38b71c5..5685c2810fe53 100644 +--- a/tools/testing/selftests/bpf/progs/iters.c ++++ b/tools/testing/selftests/bpf/progs/iters.c +@@ -1411,4 +1411,58 @@ __naked int checkpoint_states_deletion(void) + ); + } + ++__u32 upper, select_n, result; ++__u64 global; ++ ++static __noinline bool nest_2(char *str) ++{ ++ /* some insns (including branch insns) to ensure stacksafe() is triggered ++ * in nest_2(). This way, stacksafe() can compare frame associated with nest_1(). ++ */ ++ if (str[0] == 't') ++ return true; ++ if (str[1] == 'e') ++ return true; ++ if (str[2] == 's') ++ return true; ++ if (str[3] == 't') ++ return true; ++ return false; ++} ++ ++static __noinline bool nest_1(int n) ++{ ++ /* case 0: allocate stack, case 1: no allocate stack */ ++ switch (n) { ++ case 0: { ++ char comm[16]; ++ ++ if (bpf_get_current_comm(comm, 16)) ++ return false; ++ return nest_2(comm); ++ } ++ case 1: ++ return nest_2((char *)&global); ++ default: ++ return false; ++ } ++} ++ ++SEC("raw_tp") ++__success ++int iter_subprog_check_stacksafe(const void *ctx) ++{ ++ long i; ++ ++ bpf_for(i, 0, upper) { ++ if (!nest_1(select_n)) { ++ result = 1; ++ return 0; ++ } ++ } ++ ++ result = 2; ++ return 0; ++} ++ + char _license[] SEC("license") = "GPL"; +diff --git a/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c b/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c +index f46965053acb2..4d619bea9c758 100644 +--- a/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c ++++ b/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c +@@ -4,6 +4,10 @@ + #include + #include "bpf_misc.h" + ++#ifndef __clang__ ++#pragma GCC diagnostic ignored "-Warray-bounds" ++#endif ++ + char _license[] SEC("license") = "GPL"; + + struct { +diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c +index f66af753bbbb8..e68da33d7631d 100644 +--- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c ++++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c +@@ -597,12 +597,18 @@ int ip6vxlan_get_tunnel_src(struct __sk_buff *skb) + return TC_ACT_OK; + } + ++struct local_geneve_opt { ++ struct geneve_opt gopt; ++ int data; ++}; ++ + SEC("tc") + int geneve_set_tunnel(struct __sk_buff *skb) + { + int ret; + struct bpf_tunnel_key key; +- struct geneve_opt gopt; ++ struct local_geneve_opt local_gopt; ++ struct geneve_opt *gopt = (struct geneve_opt *) &local_gopt; + + __builtin_memset(&key, 0x0, sizeof(key)); + key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */ +@@ -610,14 +616,14 @@ int geneve_set_tunnel(struct __sk_buff *skb) + key.tunnel_tos = 0; + key.tunnel_ttl = 64; + +- __builtin_memset(&gopt, 0x0, sizeof(gopt)); +- gopt.opt_class = bpf_htons(0x102); /* Open Virtual Networking (OVN) */ +- gopt.type = 0x08; +- gopt.r1 = 0; +- gopt.r2 = 0; +- gopt.r3 = 0; +- gopt.length = 2; /* 4-byte multiple */ +- *(int *) &gopt.opt_data = bpf_htonl(0xdeadbeef); ++ __builtin_memset(gopt, 0x0, sizeof(local_gopt)); ++ gopt->opt_class = bpf_htons(0x102); /* Open Virtual Networking (OVN) */ ++ gopt->type = 0x08; ++ gopt->r1 = 0; ++ gopt->r2 = 0; ++ gopt->r3 = 0; ++ gopt->length = 2; /* 4-byte multiple */ ++ *(int *) &gopt->opt_data = bpf_htonl(0xdeadbeef); + + ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), + BPF_F_ZERO_CSUM_TX); +@@ -626,7 +632,7 @@ int geneve_set_tunnel(struct __sk_buff *skb) + return TC_ACT_SHOT; + } + +- ret = bpf_skb_set_tunnel_opt(skb, &gopt, sizeof(gopt)); ++ ret = bpf_skb_set_tunnel_opt(skb, gopt, sizeof(local_gopt)); + if (ret < 0) { + log_err(ret); + return TC_ACT_SHOT; +@@ -661,7 +667,8 @@ SEC("tc") + int ip6geneve_set_tunnel(struct __sk_buff *skb) + { + struct bpf_tunnel_key key; +- struct geneve_opt gopt; ++ struct local_geneve_opt local_gopt; ++ struct geneve_opt *gopt = (struct geneve_opt *) &local_gopt; + int ret; + + __builtin_memset(&key, 0x0, sizeof(key)); +@@ -677,16 +684,16 @@ int ip6geneve_set_tunnel(struct __sk_buff *skb) + return TC_ACT_SHOT; + } + +- __builtin_memset(&gopt, 0x0, sizeof(gopt)); +- gopt.opt_class = bpf_htons(0x102); /* Open Virtual Networking (OVN) */ +- gopt.type = 0x08; +- gopt.r1 = 0; +- gopt.r2 = 0; +- gopt.r3 = 0; +- gopt.length = 2; /* 4-byte multiple */ +- *(int *) &gopt.opt_data = bpf_htonl(0xfeedbeef); ++ __builtin_memset(gopt, 0x0, sizeof(local_gopt)); ++ gopt->opt_class = bpf_htons(0x102); /* Open Virtual Networking (OVN) */ ++ gopt->type = 0x08; ++ gopt->r1 = 0; ++ gopt->r2 = 0; ++ gopt->r3 = 0; ++ gopt->length = 2; /* 4-byte multiple */ ++ *(int *) &gopt->opt_data = bpf_htonl(0xfeedbeef); + +- ret = bpf_skb_set_tunnel_opt(skb, &gopt, sizeof(gopt)); ++ ret = bpf_skb_set_tunnel_opt(skb, gopt, sizeof(gopt)); + if (ret < 0) { + log_err(ret); + return TC_ACT_SHOT; +diff --git a/tools/testing/selftests/core/close_range_test.c b/tools/testing/selftests/core/close_range_test.c +index 749239930ca83..190c57b0efeba 100644 +--- a/tools/testing/selftests/core/close_range_test.c ++++ b/tools/testing/selftests/core/close_range_test.c +@@ -563,4 +563,39 @@ TEST(close_range_cloexec_unshare_syzbot) + EXPECT_EQ(close(fd3), 0); + } + ++TEST(close_range_bitmap_corruption) ++{ ++ pid_t pid; ++ int status; ++ struct __clone_args args = { ++ .flags = CLONE_FILES, ++ .exit_signal = SIGCHLD, ++ }; ++ ++ /* get the first 128 descriptors open */ ++ for (int i = 2; i < 128; i++) ++ EXPECT_GE(dup2(0, i), 0); ++ ++ /* get descriptor table shared */ ++ pid = sys_clone3(&args, sizeof(args)); ++ ASSERT_GE(pid, 0); ++ ++ if (pid == 0) { ++ /* unshare and truncate descriptor table down to 64 */ ++ if (sys_close_range(64, ~0U, CLOSE_RANGE_UNSHARE)) ++ exit(EXIT_FAILURE); ++ ++ ASSERT_EQ(fcntl(64, F_GETFD), -1); ++ /* ... and verify that the range 64..127 is not ++ stuck "fully used" according to secondary bitmap */ ++ EXPECT_EQ(dup(0), 64) ++ exit(EXIT_FAILURE); ++ exit(EXIT_SUCCESS); ++ } ++ ++ EXPECT_EQ(waitpid(pid, &status, 0), pid); ++ EXPECT_EQ(true, WIFEXITED(status)); ++ EXPECT_EQ(0, WEXITSTATUS(status)); ++} ++ + TEST_HARNESS_MAIN +diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile +index 8b2b9bb8bad10..c9fcbc6e5121e 100644 +--- a/tools/testing/selftests/mm/Makefile ++++ b/tools/testing/selftests/mm/Makefile +@@ -51,7 +51,9 @@ TEST_GEN_FILES += madv_populate + TEST_GEN_FILES += map_fixed_noreplace + TEST_GEN_FILES += map_hugetlb + TEST_GEN_FILES += map_populate ++ifneq (,$(filter $(ARCH),arm64 riscv riscv64 x86 x86_64)) + TEST_GEN_FILES += memfd_secret ++endif + TEST_GEN_FILES += migration + TEST_GEN_FILES += mkdirty + TEST_GEN_FILES += mlock-random-test +diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh +index 3e2bc818d566f..d7b2c9d07eec5 100755 +--- a/tools/testing/selftests/mm/run_vmtests.sh ++++ b/tools/testing/selftests/mm/run_vmtests.sh +@@ -5,6 +5,7 @@ + # Kselftest framework requirement - SKIP code is 4. + ksft_skip=4 + ++count_total=0 + count_pass=0 + count_fail=0 + count_skip=0 +@@ -17,6 +18,7 @@ usage: ${BASH_SOURCE[0]:-$0} [ options ] + -a: run all tests, including extra ones + -t: specify specific categories to tests to run + -h: display this message ++ -n: disable TAP output + + The default behavior is to run required tests only. If -a is specified, + will run all tests. +@@ -75,12 +77,14 @@ EOF + } + + RUN_ALL=false ++TAP_PREFIX="# " + +-while getopts "aht:" OPT; do ++while getopts "aht:n" OPT; do + case ${OPT} in + "a") RUN_ALL=true ;; + "h") usage ;; + "t") VM_SELFTEST_ITEMS=${OPTARG} ;; ++ "n") TAP_PREFIX= ;; + esac + done + shift $((OPTIND -1)) +@@ -182,30 +186,52 @@ fi + VADDR64=0 + echo "$ARCH64STR" | grep "$ARCH" &>/dev/null && VADDR64=1 + ++tap_prefix() { ++ sed -e "s/^/${TAP_PREFIX}/" ++} ++ ++tap_output() { ++ if [[ ! -z "$TAP_PREFIX" ]]; then ++ read str ++ echo $str ++ fi ++} ++ ++pretty_name() { ++ echo "$*" | sed -e 's/^\(bash \)\?\.\///' ++} ++ + # Usage: run_test [test binary] [arbitrary test arguments...] + run_test() { + if test_selected ${CATEGORY}; then ++ local test=$(pretty_name "$*") + local title="running $*" + local sep=$(echo -n "$title" | tr "[:graph:][:space:]" -) +- printf "%s\n%s\n%s\n" "$sep" "$title" "$sep" ++ printf "%s\n%s\n%s\n" "$sep" "$title" "$sep" | tap_prefix + +- "$@" +- local ret=$? ++ ("$@" 2>&1) | tap_prefix ++ local ret=${PIPESTATUS[0]} ++ count_total=$(( count_total + 1 )) + if [ $ret -eq 0 ]; then + count_pass=$(( count_pass + 1 )) +- echo "[PASS]" ++ echo "[PASS]" | tap_prefix ++ echo "ok ${count_total} ${test}" | tap_output + elif [ $ret -eq $ksft_skip ]; then + count_skip=$(( count_skip + 1 )) +- echo "[SKIP]" ++ echo "[SKIP]" | tap_prefix ++ echo "ok ${count_total} ${test} # SKIP" | tap_output + exitcode=$ksft_skip + else + count_fail=$(( count_fail + 1 )) +- echo "[FAIL]" ++ echo "[FAIL]" | tap_prefix ++ echo "not ok ${count_total} ${test} # exit=$ret" | tap_output + exitcode=1 + fi + fi # test_selected + } + ++echo "TAP version 13" | tap_output ++ + CATEGORY="hugetlb" run_test ./hugepage-mmap + + shmmax=$(cat /proc/sys/kernel/shmmax) +@@ -222,9 +248,9 @@ CATEGORY="hugetlb" run_test ./hugepage-vmemmap + CATEGORY="hugetlb" run_test ./hugetlb-madvise + + if test_selected "hugetlb"; then +- echo "NOTE: These hugetlb tests provide minimal coverage. Use" +- echo " https://github.com/libhugetlbfs/libhugetlbfs.git for" +- echo " hugetlb regression testing." ++ echo "NOTE: These hugetlb tests provide minimal coverage. Use" | tap_prefix ++ echo " https://github.com/libhugetlbfs/libhugetlbfs.git for" | tap_prefix ++ echo " hugetlb regression testing." | tap_prefix + fi + + CATEGORY="mmap" run_test ./map_fixed_noreplace +@@ -303,7 +329,11 @@ CATEGORY="hmm" run_test bash ./test_hmm.sh smoke + # MADV_POPULATE_READ and MADV_POPULATE_WRITE tests + CATEGORY="madv_populate" run_test ./madv_populate + ++if [ -x ./memfd_secret ] ++then ++(echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope 2>&1) | tap_prefix + CATEGORY="memfd_secret" run_test ./memfd_secret ++fi + + # KSM KSM_MERGE_TIME_HUGE_PAGES test with size of 100 + CATEGORY="ksm" run_test ./ksm_tests -H -s 100 +@@ -357,6 +387,7 @@ CATEGORY="mkdirty" run_test ./mkdirty + + CATEGORY="mdwe" run_test ./mdwe_test + +-echo "SUMMARY: PASS=${count_pass} SKIP=${count_skip} FAIL=${count_fail}" ++echo "SUMMARY: PASS=${count_pass} SKIP=${count_skip} FAIL=${count_fail}" | tap_prefix ++echo "1..${count_total}" | tap_output + + exit $exitcode +diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh +index a186490edb4ab..e2c35eda230af 100644 +--- a/tools/testing/selftests/net/lib.sh ++++ b/tools/testing/selftests/net/lib.sh +@@ -38,25 +38,18 @@ busywait() + cleanup_ns() + { + local ns="" +- local errexit=0 + local ret=0 + +- # disable errexit temporary +- if [[ $- =~ "e" ]]; then +- errexit=1 +- set +e +- fi +- + for ns in "$@"; do + [ -z "${ns}" ] && continue +- ip netns delete "${ns}" &> /dev/null ++ ip netns pids "${ns}" 2> /dev/null | xargs -r kill || true ++ ip netns delete "${ns}" &> /dev/null || true + if ! busywait $BUSYWAIT_TIMEOUT ip netns list \| grep -vq "^$ns$" &> /dev/null; then + echo "Warn: Failed to remove namespace $ns" + ret=1 + fi + done + +- [ $errexit -eq 1 ] && set -e + return $ret + } + +diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh +index b16b8278c4cea..e8726ec658ae4 100755 +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh +@@ -481,9 +481,10 @@ reset_with_tcp_filter() + local ns="${!1}" + local src="${2}" + local target="${3}" ++ local chain="${4:-INPUT}" + + if ! ip netns exec "${ns}" ${iptables} \ +- -A INPUT \ ++ -A "${chain}" \ + -s "${src}" \ + -p tcp \ + -j "${target}"; then +@@ -3220,6 +3221,7 @@ fullmesh_tests() + pm_nl_set_limits $ns1 1 3 + pm_nl_set_limits $ns2 1 3 + pm_nl_add_endpoint $ns1 10.0.2.1 flags signal ++ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,fullmesh + fullmesh=1 speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 3 3 3 +@@ -3574,10 +3576,10 @@ endpoint_tests() + mptcp_lib_kill_wait $tests_pid + fi + +- if reset "delete and re-add" && ++ if reset_with_tcp_filter "delete and re-add" ns2 10.0.3.2 REJECT OUTPUT && + mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then +- pm_nl_set_limits $ns1 1 1 +- pm_nl_set_limits $ns2 1 1 ++ pm_nl_set_limits $ns1 0 2 ++ pm_nl_set_limits $ns2 0 2 + pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow + test_linkfail=4 speed=20 \ + run_tests $ns1 $ns2 10.0.1.1 & +@@ -3594,11 +3596,27 @@ endpoint_tests() + chk_subflow_nr "after delete" 1 + chk_mptcp_info subflows 0 subflows 0 + +- pm_nl_add_endpoint $ns2 10.0.2.2 dev ns2eth2 flags subflow ++ pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow + wait_mpj $ns2 + chk_subflow_nr "after re-add" 2 + chk_mptcp_info subflows 1 subflows 1 ++ ++ pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow ++ wait_attempt_fail $ns2 ++ chk_subflow_nr "after new reject" 2 ++ chk_mptcp_info subflows 1 subflows 1 ++ ++ ip netns exec "${ns2}" ${iptables} -D OUTPUT -s "10.0.3.2" -p tcp -j REJECT ++ pm_nl_del_endpoint $ns2 3 10.0.3.2 ++ pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow ++ wait_mpj $ns2 ++ chk_subflow_nr "after no reject" 3 ++ chk_mptcp_info subflows 2 subflows 2 ++ + mptcp_lib_kill_wait $tests_pid ++ ++ chk_join_nr 3 3 3 ++ chk_rm_nr 1 1 + fi + } + +diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh +index 8802604148dda..53341c8135e88 100755 +--- a/tools/testing/selftests/net/udpgro.sh ++++ b/tools/testing/selftests/net/udpgro.sh +@@ -46,17 +46,19 @@ run_one() { + local -r all="$@" + local -r tx_args=${all%rx*} + local -r rx_args=${all#*rx} ++ local ret=0 + + cfg_veth + +- ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} && \ +- echo "ok" || \ +- echo "failed" & ++ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} & ++ local PID1=$! + + wait_local_port_listen ${PEER_NS} 8000 udp + ./udpgso_bench_tx ${tx_args} +- ret=$? +- wait $(jobs -p) ++ check_err $? ++ wait ${PID1} ++ check_err $? ++ [ "$ret" -eq 0 ] && echo "ok" || echo "failed" + return $ret + } + +@@ -73,6 +75,7 @@ run_one_nat() { + local -r all="$@" + local -r tx_args=${all%rx*} + local -r rx_args=${all#*rx} ++ local ret=0 + + if [[ ${tx_args} = *-4* ]]; then + ipt_cmd=iptables +@@ -93,16 +96,17 @@ run_one_nat() { + # ... so that GRO will match the UDP_GRO enabled socket, but packets + # will land on the 'plain' one + ip netns exec "${PEER_NS}" ./udpgso_bench_rx -G ${family} -b ${addr1} -n 0 & +- pid=$! +- ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${family} -b ${addr2%/*} ${rx_args} && \ +- echo "ok" || \ +- echo "failed"& ++ local PID1=$! ++ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${family} -b ${addr2%/*} ${rx_args} & ++ local PID2=$! + + wait_local_port_listen "${PEER_NS}" 8000 udp + ./udpgso_bench_tx ${tx_args} +- ret=$? +- kill -INT $pid +- wait $(jobs -p) ++ check_err $? ++ kill -INT ${PID1} ++ wait ${PID2} ++ check_err $? ++ [ "$ret" -eq 0 ] && echo "ok" || echo "failed" + return $ret + } + +@@ -111,20 +115,26 @@ run_one_2sock() { + local -r all="$@" + local -r tx_args=${all%rx*} + local -r rx_args=${all#*rx} ++ local ret=0 + + cfg_veth + + ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} -p 12345 & +- ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 10 ${rx_args} && \ +- echo "ok" || \ +- echo "failed" & ++ local PID1=$! ++ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 10 ${rx_args} & ++ local PID2=$! + + wait_local_port_listen "${PEER_NS}" 12345 udp + ./udpgso_bench_tx ${tx_args} -p 12345 ++ check_err $? + wait_local_port_listen "${PEER_NS}" 8000 udp + ./udpgso_bench_tx ${tx_args} +- ret=$? +- wait $(jobs -p) ++ check_err $? ++ wait ${PID1} ++ check_err $? ++ wait ${PID2} ++ check_err $? ++ [ "$ret" -eq 0 ] && echo "ok" || echo "failed" + return $ret + } + +diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py +index b98256f38447d..8f969f54ddf40 100755 +--- a/tools/testing/selftests/tc-testing/tdc.py ++++ b/tools/testing/selftests/tc-testing/tdc.py +@@ -129,7 +129,6 @@ class PluginMgr: + except Exception as ee: + print('exception {} in call to pre_case for {} plugin'. + format(ee, pgn_inst.__class__)) +- print('test_ordinal is {}'.format(test_ordinal)) + print('testid is {}'.format(caseinfo['id'])) + raise + +diff --git a/tools/tracing/rtla/src/osnoise_top.c b/tools/tracing/rtla/src/osnoise_top.c +index 457360db07673..d7295c0115a28 100644 +--- a/tools/tracing/rtla/src/osnoise_top.c ++++ b/tools/tracing/rtla/src/osnoise_top.c +@@ -624,8 +624,10 @@ struct osnoise_tool *osnoise_init_top(struct osnoise_top_params *params) + return NULL; + + tool->data = osnoise_alloc_top(nr_cpus); +- if (!tool->data) +- goto out_err; ++ if (!tool->data) { ++ osnoise_destroy_tool(tool); ++ return NULL; ++ } + + tool->params = params; + +@@ -633,11 +635,6 @@ struct osnoise_tool *osnoise_init_top(struct osnoise_top_params *params) + osnoise_top_handler, NULL); + + return tool; +- +-out_err: +- osnoise_free_top(tool->data); +- osnoise_destroy_tool(tool); +- return NULL; + } + + static int stop_tracing; diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.48-49.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.48-49.patch new file mode 100644 index 000000000000..e30e76bbb0e2 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.48-49.patch @@ -0,0 +1,3833 @@ +diff --git a/Makefile b/Makefile +index cef1e15ad7606..008fa9137c732 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 48 ++SUBLEVEL = 49 + EXTRAVERSION = + NAME = Hurr durr I'ma ninja sloth + +diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c +index 7fcf3e9b71030..c81183935e970 100644 +--- a/arch/alpha/kernel/pci_iommu.c ++++ b/arch/alpha/kernel/pci_iommu.c +@@ -929,7 +929,7 @@ const struct dma_map_ops alpha_pci_ops = { + .dma_supported = alpha_pci_supported, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, +- .alloc_pages_op = dma_common_alloc_pages, ++ .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + }; + EXPORT_SYMBOL(alpha_pci_ops); +diff --git a/arch/arm/boot/dts/nxp/imx/imx6dl-yapp43-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx6dl-yapp43-common.dtsi +index 52a0f6ee426f9..bcf4d9c870ec9 100644 +--- a/arch/arm/boot/dts/nxp/imx/imx6dl-yapp43-common.dtsi ++++ b/arch/arm/boot/dts/nxp/imx/imx6dl-yapp43-common.dtsi +@@ -274,24 +274,24 @@ leds: led-controller@30 { + + led@0 { + chan-name = "R"; +- led-cur = /bits/ 8 <0x20>; +- max-cur = /bits/ 8 <0x60>; ++ led-cur = /bits/ 8 <0x6e>; ++ max-cur = /bits/ 8 <0xc8>; + reg = <0>; + color = ; + }; + + led@1 { + chan-name = "G"; +- led-cur = /bits/ 8 <0x20>; +- max-cur = /bits/ 8 <0x60>; ++ led-cur = /bits/ 8 <0xbe>; ++ max-cur = /bits/ 8 <0xc8>; + reg = <1>; + color = ; + }; + + led@2 { + chan-name = "B"; +- led-cur = /bits/ 8 <0x20>; +- max-cur = /bits/ 8 <0x60>; ++ led-cur = /bits/ 8 <0xbe>; ++ max-cur = /bits/ 8 <0xc8>; + reg = <2>; + color = ; + }; +diff --git a/arch/arm/boot/dts/ti/omap/omap3-n900.dts b/arch/arm/boot/dts/ti/omap/omap3-n900.dts +index d334853412517..036e472b77beb 100644 +--- a/arch/arm/boot/dts/ti/omap/omap3-n900.dts ++++ b/arch/arm/boot/dts/ti/omap/omap3-n900.dts +@@ -781,7 +781,7 @@ accelerometer@1d { + + mount-matrix = "-1", "0", "0", + "0", "1", "0", +- "0", "0", "1"; ++ "0", "0", "-1"; + }; + + cam1: camera@3e { +diff --git a/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts +index acd265d8b58ed..e094f409028dd 100644 +--- a/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts ++++ b/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts +@@ -163,13 +163,12 @@ sound-wm8962 { + + simple-audio-card,cpu { + sound-dai = <&sai3>; ++ frame-master; ++ bitclock-master; + }; + + simple-audio-card,codec { + sound-dai = <&wm8962>; +- clocks = <&clk IMX8MP_CLK_IPP_DO_CLKO1>; +- frame-master; +- bitclock-master; + }; + }; + }; +@@ -381,10 +380,9 @@ &pcie_phy { + &sai3 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_sai3>; +- assigned-clocks = <&clk IMX8MP_CLK_SAI3>, +- <&clk IMX8MP_AUDIO_PLL2> ; +- assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>; +- assigned-clock-rates = <12288000>, <361267200>; ++ assigned-clocks = <&clk IMX8MP_CLK_SAI3>; ++ assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL1_OUT>; ++ assigned-clock-rates = <12288000>; + fsl,sai-mclk-direction-output; + status = "okay"; + }; +diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts +index 3c5c67ebee5d3..aaf9685ef0fbb 100644 +--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts ++++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts +@@ -437,7 +437,7 @@ &usdhc2 { + pinctrl-0 = <&pinctrl_usdhc2_hs>, <&pinctrl_usdhc2_gpio>; + pinctrl-1 = <&pinctrl_usdhc2_uhs>, <&pinctrl_usdhc2_gpio>; + pinctrl-2 = <&pinctrl_usdhc2_uhs>, <&pinctrl_usdhc2_gpio>; +- cd-gpios = <&gpio3 00 GPIO_ACTIVE_LOW>; ++ cd-gpios = <&gpio3 0 GPIO_ACTIVE_LOW>; + vmmc-supply = <®_usdhc2_vmmc>; + bus-width = <4>; + no-sdio; +diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi b/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi +index f6e422dc2663e..b6f3c076fe54a 100644 +--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi ++++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi +@@ -19,7 +19,7 @@ reserved-memory { + linux,cma { + compatible = "shared-dma-pool"; + reusable; +- alloc-ranges = <0 0x60000000 0 0x40000000>; ++ alloc-ranges = <0 0x80000000 0 0x40000000>; + size = <0 0x10000000>; + linux,cma-default; + }; +diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi +index 943b7e6655634..35155b009dd24 100644 +--- a/arch/arm64/boot/dts/freescale/imx93.dtsi ++++ b/arch/arm64/boot/dts/freescale/imx93.dtsi +@@ -786,6 +786,8 @@ fec: ethernet@42890000 { + fsl,num-tx-queues = <3>; + fsl,num-rx-queues = <3>; + fsl,stop-mode = <&wakeupmix_gpr 0x0c 1>; ++ nvmem-cells = <ð_mac1>; ++ nvmem-cell-names = "mac-address"; + status = "disabled"; + }; + +@@ -807,7 +809,9 @@ eqos: ethernet@428a0000 { + <&clk IMX93_CLK_SYS_PLL_PFD0_DIV2>; + assigned-clock-rates = <100000000>, <250000000>; + intf_mode = <&wakeupmix_gpr 0x28>; +- snps,clk-csr = <0>; ++ snps,clk-csr = <6>; ++ nvmem-cells = <ð_mac2>; ++ nvmem-cell-names = "mac-address"; + status = "disabled"; + }; + +@@ -888,6 +892,15 @@ ocotp: efuse@47510000 { + reg = <0x47510000 0x10000>; + #address-cells = <1>; + #size-cells = <1>; ++ ++ eth_mac1: mac-address@4ec { ++ reg = <0x4ec 0x6>; ++ }; ++ ++ eth_mac2: mac-address@4f2 { ++ reg = <0x4f2 0x6>; ++ }; ++ + }; + + s4muap: mailbox@47520000 { +diff --git a/arch/loongarch/include/asm/dma-direct.h b/arch/loongarch/include/asm/dma-direct.h +deleted file mode 100644 +index 75ccd808a2af3..0000000000000 +--- a/arch/loongarch/include/asm/dma-direct.h ++++ /dev/null +@@ -1,11 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * Copyright (C) 2020-2022 Loongson Technology Corporation Limited +- */ +-#ifndef _LOONGARCH_DMA_DIRECT_H +-#define _LOONGARCH_DMA_DIRECT_H +- +-dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); +-phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); +- +-#endif /* _LOONGARCH_DMA_DIRECT_H */ +diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c +index c97b089b99029..eabddb89d221f 100644 +--- a/arch/mips/jazz/jazzdma.c ++++ b/arch/mips/jazz/jazzdma.c +@@ -617,7 +617,7 @@ const struct dma_map_ops jazz_dma_ops = { + .sync_sg_for_device = jazz_dma_sync_sg_for_device, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, +- .alloc_pages_op = dma_common_alloc_pages, ++ .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + }; + EXPORT_SYMBOL(jazz_dma_ops); +diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c +index f0ae39e77e374..8920862ffd791 100644 +--- a/arch/powerpc/kernel/dma-iommu.c ++++ b/arch/powerpc/kernel/dma-iommu.c +@@ -216,6 +216,6 @@ const struct dma_map_ops dma_iommu_ops = { + .get_required_mask = dma_iommu_get_required_mask, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, +- .alloc_pages_op = dma_common_alloc_pages, ++ .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + }; +diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c +index 56dc6b29a3e76..d6b5f5ecd5152 100644 +--- a/arch/powerpc/platforms/ps3/system-bus.c ++++ b/arch/powerpc/platforms/ps3/system-bus.c +@@ -695,7 +695,7 @@ static const struct dma_map_ops ps3_sb_dma_ops = { + .unmap_page = ps3_unmap_page, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, +- .alloc_pages_op = dma_common_alloc_pages, ++ .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + }; + +@@ -709,7 +709,7 @@ static const struct dma_map_ops ps3_ioc0_dma_ops = { + .unmap_page = ps3_unmap_page, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, +- .alloc_pages_op = dma_common_alloc_pages, ++ .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + }; + +diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c +index 0c90fc4c37963..2dc9cbc4bcd8f 100644 +--- a/arch/powerpc/platforms/pseries/vio.c ++++ b/arch/powerpc/platforms/pseries/vio.c +@@ -611,7 +611,7 @@ static const struct dma_map_ops vio_dma_mapping_ops = { + .get_required_mask = dma_iommu_get_required_mask, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, +- .alloc_pages_op = dma_common_alloc_pages, ++ .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + }; + +diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c +index 842a0ec5eaa9e..56a917df410d3 100644 +--- a/arch/x86/kernel/amd_gart_64.c ++++ b/arch/x86/kernel/amd_gart_64.c +@@ -676,7 +676,7 @@ static const struct dma_map_ops gart_dma_ops = { + .get_sgtable = dma_common_get_sgtable, + .dma_supported = dma_direct_supported, + .get_required_mask = dma_direct_get_required_mask, +- .alloc_pages_op = dma_direct_alloc_pages, ++ .alloc_pages = dma_direct_alloc_pages, + .free_pages = dma_direct_free_pages, + }; + +diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c +index b5d40e0e05f31..814dd966b1a45 100644 +--- a/drivers/bluetooth/btnxpuart.c ++++ b/drivers/bluetooth/btnxpuart.c +@@ -29,6 +29,7 @@ + #define BTNXPUART_CHECK_BOOT_SIGNATURE 3 + #define BTNXPUART_SERDEV_OPEN 4 + #define BTNXPUART_IR_IN_PROGRESS 5 ++#define BTNXPUART_FW_DOWNLOAD_ABORT 6 + + /* NXP HW err codes */ + #define BTNXPUART_IR_HW_ERR 0xb0 +@@ -126,6 +127,7 @@ struct ps_data { + struct hci_dev *hdev; + struct work_struct work; + struct timer_list ps_timer; ++ struct mutex ps_lock; + }; + + struct wakeup_cmd_payload { +@@ -158,6 +160,7 @@ struct btnxpuart_dev { + u8 fw_name[MAX_FW_FILE_NAME_LEN]; + u32 fw_dnld_v1_offset; + u32 fw_v1_sent_bytes; ++ u32 fw_dnld_v3_offset; + u32 fw_v3_offset_correction; + u32 fw_v1_expected_len; + u32 boot_reg_offset; +@@ -333,6 +336,9 @@ static void ps_start_timer(struct btnxpuart_dev *nxpdev) + + if (psdata->cur_psmode == PS_MODE_ENABLE) + mod_timer(&psdata->ps_timer, jiffies + msecs_to_jiffies(psdata->h2c_ps_interval)); ++ ++ if (psdata->ps_state == PS_STATE_AWAKE && psdata->ps_cmd == PS_CMD_ENTER_PS) ++ cancel_work_sync(&psdata->work); + } + + static void ps_cancel_timer(struct btnxpuart_dev *nxpdev) +@@ -353,6 +359,7 @@ static void ps_control(struct hci_dev *hdev, u8 ps_state) + !test_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state)) + return; + ++ mutex_lock(&psdata->ps_lock); + switch (psdata->cur_h2c_wakeupmode) { + case WAKEUP_METHOD_DTR: + if (ps_state == PS_STATE_AWAKE) +@@ -366,12 +373,15 @@ static void ps_control(struct hci_dev *hdev, u8 ps_state) + status = serdev_device_break_ctl(nxpdev->serdev, 0); + else + status = serdev_device_break_ctl(nxpdev->serdev, -1); ++ msleep(20); /* Allow chip to detect UART-break and enter sleep */ + bt_dev_dbg(hdev, "Set UART break: %s, status=%d", + str_on_off(ps_state == PS_STATE_SLEEP), status); + break; + } + if (!status) + psdata->ps_state = ps_state; ++ mutex_unlock(&psdata->ps_lock); ++ + if (ps_state == PS_STATE_AWAKE) + btnxpuart_tx_wakeup(nxpdev); + } +@@ -407,17 +417,42 @@ static void ps_setup(struct hci_dev *hdev) + + psdata->hdev = hdev; + INIT_WORK(&psdata->work, ps_work_func); ++ mutex_init(&psdata->ps_lock); + timer_setup(&psdata->ps_timer, ps_timeout_func, 0); + } + +-static void ps_wakeup(struct btnxpuart_dev *nxpdev) ++static bool ps_wakeup(struct btnxpuart_dev *nxpdev) + { + struct ps_data *psdata = &nxpdev->psdata; ++ u8 ps_state; + +- if (psdata->ps_state != PS_STATE_AWAKE) { ++ mutex_lock(&psdata->ps_lock); ++ ps_state = psdata->ps_state; ++ mutex_unlock(&psdata->ps_lock); ++ ++ if (ps_state != PS_STATE_AWAKE) { + psdata->ps_cmd = PS_CMD_EXIT_PS; + schedule_work(&psdata->work); ++ return true; + } ++ return false; ++} ++ ++static void ps_cleanup(struct btnxpuart_dev *nxpdev) ++{ ++ struct ps_data *psdata = &nxpdev->psdata; ++ u8 ps_state; ++ ++ mutex_lock(&psdata->ps_lock); ++ ps_state = psdata->ps_state; ++ mutex_unlock(&psdata->ps_lock); ++ ++ if (ps_state != PS_STATE_AWAKE) ++ ps_control(psdata->hdev, PS_STATE_AWAKE); ++ ++ ps_cancel_timer(nxpdev); ++ cancel_work_sync(&psdata->work); ++ mutex_destroy(&psdata->ps_lock); + } + + static int send_ps_cmd(struct hci_dev *hdev, void *data) +@@ -550,6 +585,7 @@ static int nxp_download_firmware(struct hci_dev *hdev) + nxpdev->fw_v1_sent_bytes = 0; + nxpdev->fw_v1_expected_len = HDR_LEN; + nxpdev->boot_reg_offset = 0; ++ nxpdev->fw_dnld_v3_offset = 0; + nxpdev->fw_v3_offset_correction = 0; + nxpdev->baudrate_changed = false; + nxpdev->timeout_changed = false; +@@ -564,14 +600,23 @@ static int nxp_download_firmware(struct hci_dev *hdev) + !test_bit(BTNXPUART_FW_DOWNLOADING, + &nxpdev->tx_state), + msecs_to_jiffies(60000)); ++ ++ release_firmware(nxpdev->fw); ++ memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name)); ++ + if (err == 0) { +- bt_dev_err(hdev, "FW Download Timeout."); ++ bt_dev_err(hdev, "FW Download Timeout. offset: %d", ++ nxpdev->fw_dnld_v1_offset ? ++ nxpdev->fw_dnld_v1_offset : ++ nxpdev->fw_dnld_v3_offset); + return -ETIMEDOUT; + } ++ if (test_bit(BTNXPUART_FW_DOWNLOAD_ABORT, &nxpdev->tx_state)) { ++ bt_dev_err(hdev, "FW Download Aborted"); ++ return -EINTR; ++ } + + serdev_device_set_flow_control(nxpdev->serdev, true); +- release_firmware(nxpdev->fw); +- memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name)); + + /* Allow the downloaded FW to initialize */ + msleep(1200); +@@ -982,8 +1027,9 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb) + goto free_skb; + } + +- serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data + offset - +- nxpdev->fw_v3_offset_correction, len); ++ nxpdev->fw_dnld_v3_offset = offset - nxpdev->fw_v3_offset_correction; ++ serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data + ++ nxpdev->fw_dnld_v3_offset, len); + + free_skb: + kfree_skb(skb); +@@ -1215,7 +1261,6 @@ static struct sk_buff *nxp_dequeue(void *data) + { + struct btnxpuart_dev *nxpdev = (struct btnxpuart_dev *)data; + +- ps_wakeup(nxpdev); + ps_start_timer(nxpdev); + return skb_dequeue(&nxpdev->txq); + } +@@ -1230,6 +1275,9 @@ static void btnxpuart_tx_work(struct work_struct *work) + struct sk_buff *skb; + int len; + ++ if (ps_wakeup(nxpdev)) ++ return; ++ + while ((skb = nxp_dequeue(nxpdev))) { + len = serdev_device_write_buf(serdev, skb->data, skb->len); + hdev->stat.byte_tx += len; +@@ -1276,7 +1324,6 @@ static int btnxpuart_close(struct hci_dev *hdev) + { + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + +- ps_wakeup(nxpdev); + serdev_device_close(nxpdev->serdev); + skb_queue_purge(&nxpdev->txq); + kfree_skb(nxpdev->rx_skb); +@@ -1412,16 +1459,22 @@ static void nxp_serdev_remove(struct serdev_device *serdev) + struct btnxpuart_dev *nxpdev = serdev_device_get_drvdata(serdev); + struct hci_dev *hdev = nxpdev->hdev; + +- /* Restore FW baudrate to fw_init_baudrate if changed. +- * This will ensure FW baudrate is in sync with +- * driver baudrate in case this driver is re-inserted. +- */ +- if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) { +- nxpdev->new_baudrate = nxpdev->fw_init_baudrate; +- nxp_set_baudrate_cmd(hdev, NULL); ++ if (is_fw_downloading(nxpdev)) { ++ set_bit(BTNXPUART_FW_DOWNLOAD_ABORT, &nxpdev->tx_state); ++ clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); ++ wake_up_interruptible(&nxpdev->check_boot_sign_wait_q); ++ wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q); ++ } else { ++ /* Restore FW baudrate to fw_init_baudrate if changed. ++ * This will ensure FW baudrate is in sync with ++ * driver baudrate in case this driver is re-inserted. ++ */ ++ if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) { ++ nxpdev->new_baudrate = nxpdev->fw_init_baudrate; ++ nxp_set_baudrate_cmd(hdev, NULL); ++ } + } +- +- ps_cancel_timer(nxpdev); ++ ps_cleanup(nxpdev); + hci_unregister_dev(hdev); + hci_free_dev(hdev); + } +diff --git a/drivers/dma/dw-edma/dw-hdma-v0-core.c b/drivers/dma/dw-edma/dw-hdma-v0-core.c +index 10e8f0715114f..e3f8db4fe909a 100644 +--- a/drivers/dma/dw-edma/dw-hdma-v0-core.c ++++ b/drivers/dma/dw-edma/dw-hdma-v0-core.c +@@ -17,8 +17,8 @@ enum dw_hdma_control { + DW_HDMA_V0_CB = BIT(0), + DW_HDMA_V0_TCB = BIT(1), + DW_HDMA_V0_LLP = BIT(2), +- DW_HDMA_V0_LIE = BIT(3), +- DW_HDMA_V0_RIE = BIT(4), ++ DW_HDMA_V0_LWIE = BIT(3), ++ DW_HDMA_V0_RWIE = BIT(4), + DW_HDMA_V0_CCS = BIT(8), + DW_HDMA_V0_LLE = BIT(9), + }; +@@ -195,25 +195,14 @@ static void dw_hdma_v0_write_ll_link(struct dw_edma_chunk *chunk, + static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk) + { + struct dw_edma_burst *child; +- struct dw_edma_chan *chan = chunk->chan; + u32 control = 0, i = 0; +- int j; + + if (chunk->cb) + control = DW_HDMA_V0_CB; + +- j = chunk->bursts_alloc; +- list_for_each_entry(child, &chunk->burst->list, list) { +- j--; +- if (!j) { +- control |= DW_HDMA_V0_LIE; +- if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL)) +- control |= DW_HDMA_V0_RIE; +- } +- ++ list_for_each_entry(child, &chunk->burst->list, list) + dw_hdma_v0_write_ll_data(chunk, i++, control, child->sz, + child->sar, child->dar); +- } + + control = DW_HDMA_V0_LLP | DW_HDMA_V0_TCB; + if (!chunk->cb) +@@ -247,10 +236,11 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first) + if (first) { + /* Enable engine */ + SET_CH_32(dw, chan->dir, chan->id, ch_en, BIT(0)); +- /* Interrupt enable&unmask - done, abort */ +- tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) | +- HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK | +- HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN; ++ /* Interrupt unmask - stop, abort */ ++ tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup); ++ tmp &= ~(HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK); ++ /* Interrupt enable - stop, abort */ ++ tmp |= HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN; + if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL)) + tmp |= HDMA_V0_REMOTE_STOP_INT_EN | HDMA_V0_REMOTE_ABORT_INT_EN; + SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp); +diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c +index 5f7d690e3dbae..b341a6f1b0438 100644 +--- a/drivers/dma/dw/core.c ++++ b/drivers/dma/dw/core.c +@@ -16,6 +16,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -621,12 +622,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + struct dw_desc *prev; + struct dw_desc *first; + u32 ctllo, ctlhi; +- u8 m_master = dwc->dws.m_master; +- u8 lms = DWC_LLP_LMS(m_master); ++ u8 lms = DWC_LLP_LMS(dwc->dws.m_master); + dma_addr_t reg; + unsigned int reg_width; + unsigned int mem_width; +- unsigned int data_width = dw->pdata->data_width[m_master]; + unsigned int i; + struct scatterlist *sg; + size_t total_len = 0; +@@ -660,7 +659,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + mem = sg_dma_address(sg); + len = sg_dma_len(sg); + +- mem_width = __ffs(data_width | mem | len); ++ mem_width = __ffs(sconfig->src_addr_width | mem | len); + + slave_sg_todev_fill_desc: + desc = dwc_desc_get(dwc); +@@ -720,7 +719,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + lli_write(desc, sar, reg); + lli_write(desc, dar, mem); + lli_write(desc, ctlhi, ctlhi); +- mem_width = __ffs(data_width | mem); ++ mem_width = __ffs(sconfig->dst_addr_width | mem); + lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width)); + desc->len = dlen; + +@@ -780,17 +779,93 @@ bool dw_dma_filter(struct dma_chan *chan, void *param) + } + EXPORT_SYMBOL_GPL(dw_dma_filter); + ++static int dwc_verify_p_buswidth(struct dma_chan *chan) ++{ ++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan); ++ struct dw_dma *dw = to_dw_dma(chan->device); ++ u32 reg_width, max_width; ++ ++ if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) ++ reg_width = dwc->dma_sconfig.dst_addr_width; ++ else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) ++ reg_width = dwc->dma_sconfig.src_addr_width; ++ else /* DMA_MEM_TO_MEM */ ++ return 0; ++ ++ max_width = dw->pdata->data_width[dwc->dws.p_master]; ++ ++ /* Fall-back to 1-byte transfer width if undefined */ ++ if (reg_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ++ reg_width = DMA_SLAVE_BUSWIDTH_1_BYTE; ++ else if (!is_power_of_2(reg_width) || reg_width > max_width) ++ return -EINVAL; ++ else /* bus width is valid */ ++ return 0; ++ ++ /* Update undefined addr width value */ ++ if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) ++ dwc->dma_sconfig.dst_addr_width = reg_width; ++ else /* DMA_DEV_TO_MEM */ ++ dwc->dma_sconfig.src_addr_width = reg_width; ++ ++ return 0; ++} ++ ++static int dwc_verify_m_buswidth(struct dma_chan *chan) ++{ ++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan); ++ struct dw_dma *dw = to_dw_dma(chan->device); ++ u32 reg_width, reg_burst, mem_width; ++ ++ mem_width = dw->pdata->data_width[dwc->dws.m_master]; ++ ++ /* ++ * It's possible to have a data portion locked in the DMA FIFO in case ++ * of the channel suspension. Subsequent channel disabling will cause ++ * that data silent loss. In order to prevent that maintain the src and ++ * dst transfer widths coherency by means of the relation: ++ * (CTLx.SRC_TR_WIDTH * CTLx.SRC_MSIZE >= CTLx.DST_TR_WIDTH) ++ * Look for the details in the commit message that brings this change. ++ * ++ * Note the DMA configs utilized in the calculations below must have ++ * been verified to have correct values by this method call. ++ */ ++ if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) { ++ reg_width = dwc->dma_sconfig.dst_addr_width; ++ if (mem_width < reg_width) ++ return -EINVAL; ++ ++ dwc->dma_sconfig.src_addr_width = mem_width; ++ } else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) { ++ reg_width = dwc->dma_sconfig.src_addr_width; ++ reg_burst = rounddown_pow_of_two(dwc->dma_sconfig.src_maxburst); ++ ++ dwc->dma_sconfig.dst_addr_width = min(mem_width, reg_width * reg_burst); ++ } ++ ++ return 0; ++} ++ + static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) + { + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(chan->device); ++ int ret; + + memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); + + dwc->dma_sconfig.src_maxburst = +- clamp(dwc->dma_sconfig.src_maxburst, 0U, dwc->max_burst); ++ clamp(dwc->dma_sconfig.src_maxburst, 1U, dwc->max_burst); + dwc->dma_sconfig.dst_maxburst = +- clamp(dwc->dma_sconfig.dst_maxburst, 0U, dwc->max_burst); ++ clamp(dwc->dma_sconfig.dst_maxburst, 1U, dwc->max_burst); ++ ++ ret = dwc_verify_p_buswidth(chan); ++ if (ret) ++ return ret; ++ ++ ret = dwc_verify_m_buswidth(chan); ++ if (ret) ++ return ret; + + dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst); + dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst); +diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c +index 16cf88acfa8ee..0a2a2c794d0ed 100644 +--- a/drivers/firmware/qcom_scm-smc.c ++++ b/drivers/firmware/qcom_scm-smc.c +@@ -71,7 +71,7 @@ int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending) + struct arm_smccc_res get_wq_res; + struct arm_smccc_args get_wq_ctx = {0}; + +- get_wq_ctx.args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ++ get_wq_ctx.args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, + ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP, + SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_WQ_CTX)); + +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +index cc74dd69acf2b..fa9f53b310793 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + #include + + #include "amdgpu.h" +@@ -848,10 +849,14 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, + } + + afb = to_amdgpu_framebuffer(new_state->fb); +- obj = new_state->fb->obj[0]; ++ obj = drm_gem_fb_get_obj(new_state->fb, 0); ++ if (!obj) { ++ DRM_ERROR("Failed to get obj from framebuffer\n"); ++ return -EINVAL; ++ } ++ + rbo = gem_to_amdgpu_bo(obj); + adev = amdgpu_ttm_adev(rbo->tbo.bdev); +- + r = amdgpu_bo_reserve(rbo, true); + if (r) { + dev_err(adev->dev, "fail to reserve bo (%d)\n", r); +diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +index 1402e468aa90f..c9a9c758531bc 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +@@ -1841,8 +1841,9 @@ static int smu_bump_power_profile_mode(struct smu_context *smu, + } + + static int smu_adjust_power_state_dynamic(struct smu_context *smu, +- enum amd_dpm_forced_level level, +- bool skip_display_settings) ++ enum amd_dpm_forced_level level, ++ bool skip_display_settings, ++ bool force_update) + { + int ret = 0; + int index = 0; +@@ -1871,7 +1872,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, + } + } + +- if (smu_dpm_ctx->dpm_level != level) { ++ if (force_update || smu_dpm_ctx->dpm_level != level) { + ret = smu_asic_set_performance_level(smu, level); + if (ret) { + dev_err(smu->adev->dev, "Failed to set performance level!"); +@@ -1882,13 +1883,12 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, + smu_dpm_ctx->dpm_level = level; + } + +- if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && +- smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { ++ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { + index = fls(smu->workload_mask); + index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; + workload[0] = smu->workload_setting[index]; + +- if (smu->power_profile_mode != workload[0]) ++ if (force_update || smu->power_profile_mode != workload[0]) + smu_bump_power_profile_mode(smu, workload, 0); + } + +@@ -1909,11 +1909,13 @@ static int smu_handle_task(struct smu_context *smu, + ret = smu_pre_display_config_changed(smu); + if (ret) + return ret; +- ret = smu_adjust_power_state_dynamic(smu, level, false); ++ ret = smu_adjust_power_state_dynamic(smu, level, false, false); + break; + case AMD_PP_TASK_COMPLETE_INIT: ++ ret = smu_adjust_power_state_dynamic(smu, level, true, true); ++ break; + case AMD_PP_TASK_READJUST_POWER_STATE: +- ret = smu_adjust_power_state_dynamic(smu, level, true); ++ ret = smu_adjust_power_state_dynamic(smu, level, true, false); + break; + default: + break; +@@ -1960,8 +1962,7 @@ static int smu_switch_power_profile(void *handle, + workload[0] = smu->workload_setting[index]; + } + +- if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && +- smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ++ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) + smu_bump_power_profile_mode(smu, workload, 0); + + return 0; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +index 717d624e9a052..890a66a2361f4 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +@@ -27,6 +27,8 @@ + **************************************************************************/ + + #include "vmwgfx_drv.h" ++ ++#include "vmwgfx_bo.h" + #include + + /* +@@ -420,13 +422,105 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d, + return 0; + } + ++static void *map_external(struct vmw_bo *bo, struct iosys_map *map) ++{ ++ struct vmw_private *vmw = ++ container_of(bo->tbo.bdev, struct vmw_private, bdev); ++ void *ptr = NULL; ++ int ret; ++ ++ if (bo->tbo.base.import_attach) { ++ ret = dma_buf_vmap(bo->tbo.base.dma_buf, map); ++ if (ret) { ++ drm_dbg_driver(&vmw->drm, ++ "Wasn't able to map external bo!\n"); ++ goto out; ++ } ++ ptr = map->vaddr; ++ } else { ++ ptr = vmw_bo_map_and_cache(bo); ++ } ++ ++out: ++ return ptr; ++} ++ ++static void unmap_external(struct vmw_bo *bo, struct iosys_map *map) ++{ ++ if (bo->tbo.base.import_attach) ++ dma_buf_vunmap(bo->tbo.base.dma_buf, map); ++ else ++ vmw_bo_unmap(bo); ++} ++ ++static int vmw_external_bo_copy(struct vmw_bo *dst, u32 dst_offset, ++ u32 dst_stride, struct vmw_bo *src, ++ u32 src_offset, u32 src_stride, ++ u32 width_in_bytes, u32 height, ++ struct vmw_diff_cpy *diff) ++{ ++ struct vmw_private *vmw = ++ container_of(dst->tbo.bdev, struct vmw_private, bdev); ++ size_t dst_size = dst->tbo.resource->size; ++ size_t src_size = src->tbo.resource->size; ++ struct iosys_map dst_map = {0}; ++ struct iosys_map src_map = {0}; ++ int ret, i; ++ int x_in_bytes; ++ u8 *vsrc; ++ u8 *vdst; ++ ++ vsrc = map_external(src, &src_map); ++ if (!vsrc) { ++ drm_dbg_driver(&vmw->drm, "Wasn't able to map src\n"); ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ vdst = map_external(dst, &dst_map); ++ if (!vdst) { ++ drm_dbg_driver(&vmw->drm, "Wasn't able to map dst\n"); ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ vsrc += src_offset; ++ vdst += dst_offset; ++ if (src_stride == dst_stride) { ++ dst_size -= dst_offset; ++ src_size -= src_offset; ++ memcpy(vdst, vsrc, ++ min(dst_stride * height, min(dst_size, src_size))); ++ } else { ++ WARN_ON(dst_stride < width_in_bytes); ++ for (i = 0; i < height; ++i) { ++ memcpy(vdst, vsrc, width_in_bytes); ++ vsrc += src_stride; ++ vdst += dst_stride; ++ } ++ } ++ ++ x_in_bytes = (dst_offset % dst_stride); ++ diff->rect.x1 = x_in_bytes / diff->cpp; ++ diff->rect.y1 = ((dst_offset - x_in_bytes) / dst_stride); ++ diff->rect.x2 = diff->rect.x1 + width_in_bytes / diff->cpp; ++ diff->rect.y2 = diff->rect.y1 + height; ++ ++ ret = 0; ++out: ++ unmap_external(src, &src_map); ++ unmap_external(dst, &dst_map); ++ ++ return ret; ++} ++ + /** + * vmw_bo_cpu_blit - in-kernel cpu blit. + * +- * @dst: Destination buffer object. ++ * @vmw_dst: Destination buffer object. + * @dst_offset: Destination offset of blit start in bytes. + * @dst_stride: Destination stride in bytes. +- * @src: Source buffer object. ++ * @vmw_src: Source buffer object. + * @src_offset: Source offset of blit start in bytes. + * @src_stride: Source stride in bytes. + * @w: Width of blit. +@@ -444,13 +538,15 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d, + * Neither of the buffer objects may be placed in PCI memory + * (Fixed memory in TTM terminology) when using this function. + */ +-int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, ++int vmw_bo_cpu_blit(struct vmw_bo *vmw_dst, + u32 dst_offset, u32 dst_stride, +- struct ttm_buffer_object *src, ++ struct vmw_bo *vmw_src, + u32 src_offset, u32 src_stride, + u32 w, u32 h, + struct vmw_diff_cpy *diff) + { ++ struct ttm_buffer_object *src = &vmw_src->tbo; ++ struct ttm_buffer_object *dst = &vmw_dst->tbo; + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false +@@ -460,6 +556,11 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, + int ret = 0; + struct page **dst_pages = NULL; + struct page **src_pages = NULL; ++ bool src_external = (src->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0; ++ bool dst_external = (dst->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0; ++ ++ if (WARN_ON(dst == src)) ++ return -EINVAL; + + /* Buffer objects need to be either pinned or reserved: */ + if (!(dst->pin_count)) +@@ -479,6 +580,11 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, + return ret; + } + ++ if (src_external || dst_external) ++ return vmw_external_bo_copy(vmw_dst, dst_offset, dst_stride, ++ vmw_src, src_offset, src_stride, ++ w, h, diff); ++ + if (!src->ttm->pages && src->ttm->sg) { + src_pages = kvmalloc_array(src->ttm->num_pages, + sizeof(struct page *), GFP_KERNEL); +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +index 13423c7b0cbdb..ac3d7ff3f5bb9 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +@@ -1355,9 +1355,9 @@ void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, + + void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n); + +-int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, ++int vmw_bo_cpu_blit(struct vmw_bo *dst, + u32 dst_offset, u32 dst_stride, +- struct ttm_buffer_object *src, ++ struct vmw_bo *src, + u32 src_offset, u32 src_stride, + u32 w, u32 h, + struct vmw_diff_cpy *diff); +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +index cb03c589ab226..b22ae25db4e17 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +@@ -497,7 +497,7 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty) + container_of(dirty->unit, typeof(*stdu), base); + s32 width, height; + s32 src_pitch, dst_pitch; +- struct ttm_buffer_object *src_bo, *dst_bo; ++ struct vmw_bo *src_bo, *dst_bo; + u32 src_offset, dst_offset; + struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(stdu->cpp); + +@@ -512,11 +512,11 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty) + + /* Assume we are blitting from Guest (bo) to Host (display_srf) */ + src_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp; +- src_bo = &stdu->display_srf->res.guest_memory_bo->tbo; ++ src_bo = stdu->display_srf->res.guest_memory_bo; + src_offset = ddirty->top * src_pitch + ddirty->left * stdu->cpp; + + dst_pitch = ddirty->pitch; +- dst_bo = &ddirty->buf->tbo; ++ dst_bo = ddirty->buf; + dst_offset = ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp; + + (void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch, +@@ -1136,7 +1136,7 @@ vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd, + struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(0); + struct vmw_stdu_update_gb_image *cmd_img = cmd; + struct vmw_stdu_update *cmd_update; +- struct ttm_buffer_object *src_bo, *dst_bo; ++ struct vmw_bo *src_bo, *dst_bo; + u32 src_offset, dst_offset; + s32 src_pitch, dst_pitch; + s32 width, height; +@@ -1150,11 +1150,11 @@ vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd, + + diff.cpp = stdu->cpp; + +- dst_bo = &stdu->display_srf->res.guest_memory_bo->tbo; ++ dst_bo = stdu->display_srf->res.guest_memory_bo; + dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp; + dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp; + +- src_bo = &vfbbo->buffer->tbo; ++ src_bo = vfbbo->buffer; + src_pitch = update->vfb->base.pitches[0]; + src_offset = bo_update->fb_top * src_pitch + bo_update->fb_left * + stdu->cpp; +diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c +index f5eb97726d1bb..2da969fc89900 100644 +--- a/drivers/iommu/dma-iommu.c ++++ b/drivers/iommu/dma-iommu.c +@@ -1614,7 +1614,7 @@ static const struct dma_map_ops iommu_dma_ops = { + .flags = DMA_F_PCI_P2PDMA_SUPPORTED, + .alloc = iommu_dma_alloc, + .free = iommu_dma_free, +- .alloc_pages_op = dma_common_alloc_pages, ++ .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + .alloc_noncontiguous = iommu_dma_alloc_noncontiguous, + .free_noncontiguous = iommu_dma_free_noncontiguous, +diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c +index 75f244a3e12df..06ffc683b28fe 100644 +--- a/drivers/iommu/io-pgtable-arm-v7s.c ++++ b/drivers/iommu/io-pgtable-arm-v7s.c +@@ -552,9 +552,8 @@ static int arm_v7s_map_pages(struct io_pgtable_ops *ops, unsigned long iova, + paddr >= (1ULL << data->iop.cfg.oas))) + return -ERANGE; + +- /* If no access, then nothing to do */ + if (!(prot & (IOMMU_READ | IOMMU_WRITE))) +- return 0; ++ return -EINVAL; + + while (pgcount--) { + ret = __arm_v7s_map(data, iova, paddr, pgsize, prot, 1, data->pgd, +diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c +index 72dcdd468cf30..934dc97f5df9e 100644 +--- a/drivers/iommu/io-pgtable-arm.c ++++ b/drivers/iommu/io-pgtable-arm.c +@@ -480,9 +480,8 @@ static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova, + if (WARN_ON(iaext || paddr >> cfg->oas)) + return -ERANGE; + +- /* If no access, then nothing to do */ + if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) +- return 0; ++ return -EINVAL; + + prot = arm_lpae_prot_to_pte(data, iommu_prot); + ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl, +diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c +index 74b1ef2b96bee..10811e0b773d3 100644 +--- a/drivers/iommu/io-pgtable-dart.c ++++ b/drivers/iommu/io-pgtable-dart.c +@@ -250,9 +250,8 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova, + if (WARN_ON(paddr >> cfg->oas)) + return -ERANGE; + +- /* If no access, then nothing to do */ + if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) +- return 0; ++ return -EINVAL; + + tbl = dart_get_table(data, iova); + +diff --git a/drivers/iommu/iommufd/ioas.c b/drivers/iommu/iommufd/ioas.c +index d5624577f79f1..0407e2b758ef4 100644 +--- a/drivers/iommu/iommufd/ioas.c ++++ b/drivers/iommu/iommufd/ioas.c +@@ -213,6 +213,10 @@ int iommufd_ioas_map(struct iommufd_ucmd *ucmd) + if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX) + return -EOVERFLOW; + ++ if (!(cmd->flags & ++ (IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))) ++ return -EINVAL; ++ + ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id); + if (IS_ERR(ioas)) + return PTR_ERR(ioas); +@@ -253,6 +257,10 @@ int iommufd_ioas_copy(struct iommufd_ucmd *ucmd) + cmd->dst_iova >= ULONG_MAX) + return -EOVERFLOW; + ++ if (!(cmd->flags & ++ (IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))) ++ return -EINVAL; ++ + src_ioas = iommufd_get_ioas(ucmd->ictx, cmd->src_ioas_id); + if (IS_ERR(src_ioas)) + return PTR_ERR(src_ioas); +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index 566b02ca78261..53a7b53618d94 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -427,6 +427,8 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs, + struct netlink_ext_ack *extack) + { + struct net_device *bond_dev = xs->xso.dev; ++ struct net_device *real_dev; ++ netdevice_tracker tracker; + struct bond_ipsec *ipsec; + struct bonding *bond; + struct slave *slave; +@@ -438,74 +440,80 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs, + rcu_read_lock(); + bond = netdev_priv(bond_dev); + slave = rcu_dereference(bond->curr_active_slave); +- if (!slave) { +- rcu_read_unlock(); +- return -ENODEV; ++ real_dev = slave ? slave->dev : NULL; ++ netdev_hold(real_dev, &tracker, GFP_ATOMIC); ++ rcu_read_unlock(); ++ if (!real_dev) { ++ err = -ENODEV; ++ goto out; + } + +- if (!slave->dev->xfrmdev_ops || +- !slave->dev->xfrmdev_ops->xdo_dev_state_add || +- netif_is_bond_master(slave->dev)) { ++ if (!real_dev->xfrmdev_ops || ++ !real_dev->xfrmdev_ops->xdo_dev_state_add || ++ netif_is_bond_master(real_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Slave does not support ipsec offload"); +- rcu_read_unlock(); +- return -EINVAL; ++ err = -EINVAL; ++ goto out; + } + +- ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC); ++ ipsec = kmalloc(sizeof(*ipsec), GFP_KERNEL); + if (!ipsec) { +- rcu_read_unlock(); +- return -ENOMEM; ++ err = -ENOMEM; ++ goto out; + } +- xs->xso.real_dev = slave->dev; + +- err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs, extack); ++ xs->xso.real_dev = real_dev; ++ err = real_dev->xfrmdev_ops->xdo_dev_state_add(xs, extack); + if (!err) { + ipsec->xs = xs; + INIT_LIST_HEAD(&ipsec->list); +- spin_lock_bh(&bond->ipsec_lock); ++ mutex_lock(&bond->ipsec_lock); + list_add(&ipsec->list, &bond->ipsec_list); +- spin_unlock_bh(&bond->ipsec_lock); ++ mutex_unlock(&bond->ipsec_lock); + } else { + kfree(ipsec); + } +- rcu_read_unlock(); ++out: ++ netdev_put(real_dev, &tracker); + return err; + } + + static void bond_ipsec_add_sa_all(struct bonding *bond) + { + struct net_device *bond_dev = bond->dev; ++ struct net_device *real_dev; + struct bond_ipsec *ipsec; + struct slave *slave; + +- rcu_read_lock(); +- slave = rcu_dereference(bond->curr_active_slave); +- if (!slave) +- goto out; ++ slave = rtnl_dereference(bond->curr_active_slave); ++ real_dev = slave ? slave->dev : NULL; ++ if (!real_dev) ++ return; + +- if (!slave->dev->xfrmdev_ops || +- !slave->dev->xfrmdev_ops->xdo_dev_state_add || +- netif_is_bond_master(slave->dev)) { +- spin_lock_bh(&bond->ipsec_lock); ++ mutex_lock(&bond->ipsec_lock); ++ if (!real_dev->xfrmdev_ops || ++ !real_dev->xfrmdev_ops->xdo_dev_state_add || ++ netif_is_bond_master(real_dev)) { + if (!list_empty(&bond->ipsec_list)) +- slave_warn(bond_dev, slave->dev, ++ slave_warn(bond_dev, real_dev, + "%s: no slave xdo_dev_state_add\n", + __func__); +- spin_unlock_bh(&bond->ipsec_lock); + goto out; + } + +- spin_lock_bh(&bond->ipsec_lock); + list_for_each_entry(ipsec, &bond->ipsec_list, list) { +- ipsec->xs->xso.real_dev = slave->dev; +- if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) { +- slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__); ++ /* If new state is added before ipsec_lock acquired */ ++ if (ipsec->xs->xso.real_dev == real_dev) ++ continue; ++ ++ ipsec->xs->xso.real_dev = real_dev; ++ if (real_dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) { ++ slave_warn(bond_dev, real_dev, "%s: failed to add SA\n", __func__); + ipsec->xs->xso.real_dev = NULL; + } + } +- spin_unlock_bh(&bond->ipsec_lock); + out: +- rcu_read_unlock(); ++ mutex_unlock(&bond->ipsec_lock); + } + + /** +@@ -515,6 +523,8 @@ static void bond_ipsec_add_sa_all(struct bonding *bond) + static void bond_ipsec_del_sa(struct xfrm_state *xs) + { + struct net_device *bond_dev = xs->xso.dev; ++ struct net_device *real_dev; ++ netdevice_tracker tracker; + struct bond_ipsec *ipsec; + struct bonding *bond; + struct slave *slave; +@@ -525,6 +535,9 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs) + rcu_read_lock(); + bond = netdev_priv(bond_dev); + slave = rcu_dereference(bond->curr_active_slave); ++ real_dev = slave ? slave->dev : NULL; ++ netdev_hold(real_dev, &tracker, GFP_ATOMIC); ++ rcu_read_unlock(); + + if (!slave) + goto out; +@@ -532,18 +545,19 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs) + if (!xs->xso.real_dev) + goto out; + +- WARN_ON(xs->xso.real_dev != slave->dev); ++ WARN_ON(xs->xso.real_dev != real_dev); + +- if (!slave->dev->xfrmdev_ops || +- !slave->dev->xfrmdev_ops->xdo_dev_state_delete || +- netif_is_bond_master(slave->dev)) { +- slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__); ++ if (!real_dev->xfrmdev_ops || ++ !real_dev->xfrmdev_ops->xdo_dev_state_delete || ++ netif_is_bond_master(real_dev)) { ++ slave_warn(bond_dev, real_dev, "%s: no slave xdo_dev_state_delete\n", __func__); + goto out; + } + +- slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs); ++ real_dev->xfrmdev_ops->xdo_dev_state_delete(xs); + out: +- spin_lock_bh(&bond->ipsec_lock); ++ netdev_put(real_dev, &tracker); ++ mutex_lock(&bond->ipsec_lock); + list_for_each_entry(ipsec, &bond->ipsec_list, list) { + if (ipsec->xs == xs) { + list_del(&ipsec->list); +@@ -551,40 +565,72 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs) + break; + } + } +- spin_unlock_bh(&bond->ipsec_lock); +- rcu_read_unlock(); ++ mutex_unlock(&bond->ipsec_lock); + } + + static void bond_ipsec_del_sa_all(struct bonding *bond) + { + struct net_device *bond_dev = bond->dev; ++ struct net_device *real_dev; + struct bond_ipsec *ipsec; + struct slave *slave; + +- rcu_read_lock(); +- slave = rcu_dereference(bond->curr_active_slave); +- if (!slave) { +- rcu_read_unlock(); ++ slave = rtnl_dereference(bond->curr_active_slave); ++ real_dev = slave ? slave->dev : NULL; ++ if (!real_dev) + return; +- } + +- spin_lock_bh(&bond->ipsec_lock); ++ mutex_lock(&bond->ipsec_lock); + list_for_each_entry(ipsec, &bond->ipsec_list, list) { + if (!ipsec->xs->xso.real_dev) + continue; + +- if (!slave->dev->xfrmdev_ops || +- !slave->dev->xfrmdev_ops->xdo_dev_state_delete || +- netif_is_bond_master(slave->dev)) { +- slave_warn(bond_dev, slave->dev, ++ if (!real_dev->xfrmdev_ops || ++ !real_dev->xfrmdev_ops->xdo_dev_state_delete || ++ netif_is_bond_master(real_dev)) { ++ slave_warn(bond_dev, real_dev, + "%s: no slave xdo_dev_state_delete\n", + __func__); + } else { +- slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs); ++ real_dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs); ++ if (real_dev->xfrmdev_ops->xdo_dev_state_free) ++ real_dev->xfrmdev_ops->xdo_dev_state_free(ipsec->xs); + } + } +- spin_unlock_bh(&bond->ipsec_lock); ++ mutex_unlock(&bond->ipsec_lock); ++} ++ ++static void bond_ipsec_free_sa(struct xfrm_state *xs) ++{ ++ struct net_device *bond_dev = xs->xso.dev; ++ struct net_device *real_dev; ++ netdevice_tracker tracker; ++ struct bonding *bond; ++ struct slave *slave; ++ ++ if (!bond_dev) ++ return; ++ ++ rcu_read_lock(); ++ bond = netdev_priv(bond_dev); ++ slave = rcu_dereference(bond->curr_active_slave); ++ real_dev = slave ? slave->dev : NULL; ++ netdev_hold(real_dev, &tracker, GFP_ATOMIC); + rcu_read_unlock(); ++ ++ if (!slave) ++ goto out; ++ ++ if (!xs->xso.real_dev) ++ goto out; ++ ++ WARN_ON(xs->xso.real_dev != real_dev); ++ ++ if (real_dev && real_dev->xfrmdev_ops && ++ real_dev->xfrmdev_ops->xdo_dev_state_free) ++ real_dev->xfrmdev_ops->xdo_dev_state_free(xs); ++out: ++ netdev_put(real_dev, &tracker); + } + + /** +@@ -627,6 +673,7 @@ static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) + static const struct xfrmdev_ops bond_xfrmdev_ops = { + .xdo_dev_state_add = bond_ipsec_add_sa, + .xdo_dev_state_delete = bond_ipsec_del_sa, ++ .xdo_dev_state_free = bond_ipsec_free_sa, + .xdo_dev_offload_ok = bond_ipsec_offload_ok, + }; + #endif /* CONFIG_XFRM_OFFLOAD */ +@@ -5897,7 +5944,7 @@ void bond_setup(struct net_device *bond_dev) + /* set up xfrm device ops (only supported in active-backup right now) */ + bond_dev->xfrmdev_ops = &bond_xfrmdev_ops; + INIT_LIST_HEAD(&bond->ipsec_list); +- spin_lock_init(&bond->ipsec_lock); ++ mutex_init(&bond->ipsec_lock); + #endif /* CONFIG_XFRM_OFFLOAD */ + + /* don't acquire bond device's netif_tx_lock when transmitting */ +@@ -5946,6 +5993,10 @@ static void bond_uninit(struct net_device *bond_dev) + __bond_release_one(bond_dev, slave->dev, true, true); + netdev_info(bond_dev, "Released all slaves\n"); + ++#ifdef CONFIG_XFRM_OFFLOAD ++ mutex_destroy(&bond->ipsec_lock); ++#endif /* CONFIG_XFRM_OFFLOAD */ ++ + bond_set_slave_arr(bond, NULL, NULL); + + list_del(&bond->bond_list); +diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c +index 01fed4fe84185..236daa0535ba0 100644 +--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c ++++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c +@@ -51,9 +51,33 @@ static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx, + return 0; + } + ++static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq, ++ struct hwc_work_request *req) ++{ ++ struct device *dev = hwc_rxq->hwc->dev; ++ struct gdma_sge *sge; ++ int err; ++ ++ sge = &req->sge; ++ sge->address = (u64)req->buf_sge_addr; ++ sge->mem_key = hwc_rxq->msg_buf->gpa_mkey; ++ sge->size = req->buf_len; ++ ++ memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request)); ++ req->wqe_req.sgl = sge; ++ req->wqe_req.num_sge = 1; ++ req->wqe_req.client_data_unit = 0; ++ ++ err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL); ++ if (err) ++ dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err); ++ return err; ++} ++ + static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len, +- const struct gdma_resp_hdr *resp_msg) ++ struct hwc_work_request *rx_req) + { ++ const struct gdma_resp_hdr *resp_msg = rx_req->buf_va; + struct hwc_caller_ctx *ctx; + int err; + +@@ -61,6 +85,7 @@ static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len, + hwc->inflight_msg_res.map)) { + dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n", + resp_msg->response.hwc_msg_id); ++ mana_hwc_post_rx_wqe(hwc->rxq, rx_req); + return; + } + +@@ -74,30 +99,13 @@ static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len, + memcpy(ctx->output_buf, resp_msg, resp_len); + out: + ctx->error = err; +- complete(&ctx->comp_event); +-} +- +-static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq, +- struct hwc_work_request *req) +-{ +- struct device *dev = hwc_rxq->hwc->dev; +- struct gdma_sge *sge; +- int err; +- +- sge = &req->sge; +- sge->address = (u64)req->buf_sge_addr; +- sge->mem_key = hwc_rxq->msg_buf->gpa_mkey; +- sge->size = req->buf_len; + +- memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request)); +- req->wqe_req.sgl = sge; +- req->wqe_req.num_sge = 1; +- req->wqe_req.client_data_unit = 0; ++ /* Must post rx wqe before complete(), otherwise the next rx may ++ * hit no_wqe error. ++ */ ++ mana_hwc_post_rx_wqe(hwc->rxq, rx_req); + +- err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL); +- if (err) +- dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err); +- return err; ++ complete(&ctx->comp_event); + } + + static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self, +@@ -234,14 +242,12 @@ static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id, + return; + } + +- mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp); ++ mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, rx_req); + +- /* Do no longer use 'resp', because the buffer is posted to the HW +- * in the below mana_hwc_post_rx_wqe(). ++ /* Can no longer use 'resp', because the buffer is posted to the HW ++ * in mana_hwc_handle_resp() above. + */ + resp = NULL; +- +- mana_hwc_post_rx_wqe(hwc_rxq, rx_req); + } + + static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id, +diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c +index 931b65591f4d1..9b0b22b65cb25 100644 +--- a/drivers/net/gtp.c ++++ b/drivers/net/gtp.c +@@ -1220,7 +1220,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type, + sock = sockfd_lookup(fd, &err); + if (!sock) { + pr_debug("gtp socket fd=%d not found\n", fd); +- return NULL; ++ return ERR_PTR(err); + } + + sk = sock->sk; +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +index 2cda1dcfd059a..9943e2d21a8f5 100644 +--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c ++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +@@ -867,22 +867,25 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt) + entry = &wifi_pkg->package.elements[entry_idx]; + entry_idx++; + if (entry->type != ACPI_TYPE_INTEGER || +- entry->integer.value > num_profiles) { ++ entry->integer.value > num_profiles || ++ entry->integer.value < ++ rev_data[idx].min_profiles) { + ret = -EINVAL; + goto out_free; + } +- num_profiles = entry->integer.value; + + /* +- * this also validates >= min_profiles since we +- * otherwise wouldn't have gotten the data when +- * looking up in ACPI ++ * Check to see if we received package count ++ * same as max # of profiles + */ + if (wifi_pkg->package.count != + hdr_size + profile_size * num_profiles) { + ret = -EINVAL; + goto out_free; + } ++ ++ /* Number of valid profiles */ ++ num_profiles = entry->integer.value; + } + goto read_table; + } +diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +index 6f01b7573b23c..b7ead0cd00450 100644 +--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c ++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +@@ -4362,11 +4362,27 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) + if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info)) + wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); + +- wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz; +- if (adapter->config_bands & BAND_A) +- wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz; +- else ++ wiphy->bands[NL80211_BAND_2GHZ] = devm_kmemdup(adapter->dev, ++ &mwifiex_band_2ghz, ++ sizeof(mwifiex_band_2ghz), ++ GFP_KERNEL); ++ if (!wiphy->bands[NL80211_BAND_2GHZ]) { ++ ret = -ENOMEM; ++ goto err; ++ } ++ ++ if (adapter->config_bands & BAND_A) { ++ wiphy->bands[NL80211_BAND_5GHZ] = devm_kmemdup(adapter->dev, ++ &mwifiex_band_5ghz, ++ sizeof(mwifiex_band_5ghz), ++ GFP_KERNEL); ++ if (!wiphy->bands[NL80211_BAND_5GHZ]) { ++ ret = -ENOMEM; ++ goto err; ++ } ++ } else { + wiphy->bands[NL80211_BAND_5GHZ] = NULL; ++ } + + if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info)) + wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs; +@@ -4460,8 +4476,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) + if (ret < 0) { + mwifiex_dbg(adapter, ERROR, + "%s: wiphy_register failed: %d\n", __func__, ret); +- wiphy_free(wiphy); +- return ret; ++ goto err; + } + + if (!adapter->regd) { +@@ -4503,4 +4518,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) + + adapter->wiphy = wiphy; + return ret; ++ ++err: ++ wiphy_free(wiphy); ++ ++ return ret; + } +diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c +index 871667650dbef..048a552e9da1d 100644 +--- a/drivers/net/wireless/silabs/wfx/sta.c ++++ b/drivers/net/wireless/silabs/wfx/sta.c +@@ -370,8 +370,11 @@ static int wfx_set_mfp_ap(struct wfx_vif *wvif) + + ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset, + skb->len - ieoffset); +- if (unlikely(!ptr)) ++ if (!ptr) { ++ /* No RSN IE is fine in open networks */ ++ ret = 0; + goto free_skb; ++ } + + ptr += pairwise_cipher_suite_count_offset; + if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb))) +diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c +index b19c39dcfbd93..e2bc67300a915 100644 +--- a/drivers/nfc/pn533/pn533.c ++++ b/drivers/nfc/pn533/pn533.c +@@ -1723,6 +1723,11 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev, + } + + pn533_poll_create_mod_list(dev, im_protocols, tm_protocols); ++ if (!dev->poll_mod_count) { ++ nfc_err(dev->dev, ++ "Poll mod list is empty\n"); ++ return -EINVAL; ++ } + + /* Do not always start polling from the same modulation */ + get_random_bytes(&rand_mod, sizeof(rand_mod)); +diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c +index feef537257d05..9ce0d20a6c581 100644 +--- a/drivers/parisc/ccio-dma.c ++++ b/drivers/parisc/ccio-dma.c +@@ -1022,7 +1022,7 @@ static const struct dma_map_ops ccio_ops = { + .map_sg = ccio_map_sg, + .unmap_sg = ccio_unmap_sg, + .get_sgtable = dma_common_get_sgtable, +- .alloc_pages_op = dma_common_alloc_pages, ++ .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + }; + +diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c +index 6f5b919280ff0..05e7103d1d407 100644 +--- a/drivers/parisc/sba_iommu.c ++++ b/drivers/parisc/sba_iommu.c +@@ -1090,7 +1090,7 @@ static const struct dma_map_ops sba_ops = { + .map_sg = sba_map_sg, + .unmap_sg = sba_unmap_sg, + .get_sgtable = dma_common_get_sgtable, +- .alloc_pages_op = dma_common_alloc_pages, ++ .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + }; + +diff --git a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c +index 0b9a59d5b8f02..adc6394626ce8 100644 +--- a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c ++++ b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c +@@ -176,7 +176,7 @@ static void imx8m_get_phy_tuning_data(struct imx8mq_usb_phy *imx_phy) + imx_phy->comp_dis_tune = + phy_comp_dis_tune_from_property(imx_phy->comp_dis_tune); + +- if (device_property_read_u32(dev, "fsl,pcs-tx-deemph-3p5db-attenuation-db", ++ if (device_property_read_u32(dev, "fsl,phy-pcs-tx-deemph-3p5db-attenuation-db", + &imx_phy->pcs_tx_deemph_3p5db)) + imx_phy->pcs_tx_deemph_3p5db = PHY_TUNE_DEFAULT; + else +diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c +index 0cb5088e460b5..8c8b1ca31e4c4 100644 +--- a/drivers/phy/xilinx/phy-zynqmp.c ++++ b/drivers/phy/xilinx/phy-zynqmp.c +@@ -166,6 +166,24 @@ + /* Timeout values */ + #define TIMEOUT_US 1000 + ++/* Lane 0/1/2/3 offset */ ++#define DIG_8(n) ((0x4000 * (n)) + 0x1074) ++#define ILL13(n) ((0x4000 * (n)) + 0x1994) ++#define DIG_10(n) ((0x4000 * (n)) + 0x107c) ++#define RST_DLY(n) ((0x4000 * (n)) + 0x19a4) ++#define BYP_15(n) ((0x4000 * (n)) + 0x1038) ++#define BYP_12(n) ((0x4000 * (n)) + 0x102c) ++#define MISC3(n) ((0x4000 * (n)) + 0x19ac) ++#define EQ11(n) ((0x4000 * (n)) + 0x1978) ++ ++static u32 save_reg_address[] = { ++ /* Lane 0/1/2/3 Register */ ++ DIG_8(0), ILL13(0), DIG_10(0), RST_DLY(0), BYP_15(0), BYP_12(0), MISC3(0), EQ11(0), ++ DIG_8(1), ILL13(1), DIG_10(1), RST_DLY(1), BYP_15(1), BYP_12(1), MISC3(1), EQ11(1), ++ DIG_8(2), ILL13(2), DIG_10(2), RST_DLY(2), BYP_15(2), BYP_12(2), MISC3(2), EQ11(2), ++ DIG_8(3), ILL13(3), DIG_10(3), RST_DLY(3), BYP_15(3), BYP_12(3), MISC3(3), EQ11(3), ++}; ++ + struct xpsgtr_dev; + + /** +@@ -214,6 +232,7 @@ struct xpsgtr_phy { + * @tx_term_fix: fix for GT issue + * @saved_icm_cfg0: stored value of ICM CFG0 register + * @saved_icm_cfg1: stored value of ICM CFG1 register ++ * @saved_regs: registers to be saved/restored during suspend/resume + */ + struct xpsgtr_dev { + struct device *dev; +@@ -226,6 +245,7 @@ struct xpsgtr_dev { + bool tx_term_fix; + unsigned int saved_icm_cfg0; + unsigned int saved_icm_cfg1; ++ u32 *saved_regs; + }; + + /* +@@ -299,6 +319,32 @@ static inline void xpsgtr_clr_set_phy(struct xpsgtr_phy *gtr_phy, + writel((readl(addr) & ~clr) | set, addr); + } + ++/** ++ * xpsgtr_save_lane_regs - Saves registers on suspend ++ * @gtr_dev: pointer to phy controller context structure ++ */ ++static void xpsgtr_save_lane_regs(struct xpsgtr_dev *gtr_dev) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(save_reg_address); i++) ++ gtr_dev->saved_regs[i] = xpsgtr_read(gtr_dev, ++ save_reg_address[i]); ++} ++ ++/** ++ * xpsgtr_restore_lane_regs - Restores registers on resume ++ * @gtr_dev: pointer to phy controller context structure ++ */ ++static void xpsgtr_restore_lane_regs(struct xpsgtr_dev *gtr_dev) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(save_reg_address); i++) ++ xpsgtr_write(gtr_dev, save_reg_address[i], ++ gtr_dev->saved_regs[i]); ++} ++ + /* + * Hardware Configuration + */ +@@ -839,6 +885,8 @@ static int xpsgtr_runtime_suspend(struct device *dev) + gtr_dev->saved_icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0); + gtr_dev->saved_icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1); + ++ xpsgtr_save_lane_regs(gtr_dev); ++ + return 0; + } + +@@ -849,6 +897,8 @@ static int xpsgtr_runtime_resume(struct device *dev) + unsigned int i; + bool skip_phy_init; + ++ xpsgtr_restore_lane_regs(gtr_dev); ++ + icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0); + icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1); + +@@ -994,6 +1044,12 @@ static int xpsgtr_probe(struct platform_device *pdev) + return ret; + } + ++ gtr_dev->saved_regs = devm_kmalloc(gtr_dev->dev, ++ sizeof(save_reg_address), ++ GFP_KERNEL); ++ if (!gtr_dev->saved_regs) ++ return -ENOMEM; ++ + return 0; + } + +diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c +index b7921b59eb7b1..54301fbba524a 100644 +--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c ++++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c +@@ -709,32 +709,35 @@ static int mtk_pinconf_bias_set_rsel(struct mtk_pinctrl *hw, + { + int err, rsel_val; + +- if (!pullup && arg == MTK_DISABLE) +- return 0; +- + if (hw->rsel_si_unit) { + /* find pin rsel_index from pin_rsel array*/ + err = mtk_hw_pin_rsel_lookup(hw, desc, pullup, arg, &rsel_val); + if (err) +- goto out; ++ return err; + } else { +- if (arg < MTK_PULL_SET_RSEL_000 || +- arg > MTK_PULL_SET_RSEL_111) { +- err = -EINVAL; +- goto out; +- } ++ if (arg < MTK_PULL_SET_RSEL_000 || arg > MTK_PULL_SET_RSEL_111) ++ return -EINVAL; + + rsel_val = arg - MTK_PULL_SET_RSEL_000; + } + +- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_RSEL, rsel_val); +- if (err) +- goto out; ++ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_RSEL, rsel_val); ++} + +- err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, MTK_ENABLE); ++static int mtk_pinconf_bias_set_pu_pd_rsel(struct mtk_pinctrl *hw, ++ const struct mtk_pin_desc *desc, ++ u32 pullup, u32 arg) ++{ ++ u32 enable = arg == MTK_DISABLE ? MTK_DISABLE : MTK_ENABLE; ++ int err; + +-out: +- return err; ++ if (arg != MTK_DISABLE) { ++ err = mtk_pinconf_bias_set_rsel(hw, desc, pullup, arg); ++ if (err) ++ return err; ++ } ++ ++ return mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, enable); + } + + int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw, +@@ -750,22 +753,22 @@ int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw, + try_all_type = MTK_PULL_TYPE_MASK; + + if (try_all_type & MTK_PULL_RSEL_TYPE) { +- err = mtk_pinconf_bias_set_rsel(hw, desc, pullup, arg); ++ err = mtk_pinconf_bias_set_pu_pd_rsel(hw, desc, pullup, arg); + if (!err) +- return err; ++ return 0; + } + + if (try_all_type & MTK_PULL_PU_PD_TYPE) { + err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, arg); + if (!err) +- return err; ++ return 0; + } + + if (try_all_type & MTK_PULL_PULLSEL_TYPE) { + err = mtk_pinconf_bias_set_pullsel_pullen(hw, desc, + pullup, arg); + if (!err) +- return err; ++ return 0; + } + + if (try_all_type & MTK_PULL_PUPD_R1R0_TYPE) +@@ -803,9 +806,9 @@ static int mtk_rsel_get_si_unit(struct mtk_pinctrl *hw, + return 0; + } + +-static int mtk_pinconf_bias_get_rsel(struct mtk_pinctrl *hw, +- const struct mtk_pin_desc *desc, +- u32 *pullup, u32 *enable) ++static int mtk_pinconf_bias_get_pu_pd_rsel(struct mtk_pinctrl *hw, ++ const struct mtk_pin_desc *desc, ++ u32 *pullup, u32 *enable) + { + int pu, pd, rsel, err; + +@@ -939,22 +942,22 @@ int mtk_pinconf_bias_get_combo(struct mtk_pinctrl *hw, + try_all_type = MTK_PULL_TYPE_MASK; + + if (try_all_type & MTK_PULL_RSEL_TYPE) { +- err = mtk_pinconf_bias_get_rsel(hw, desc, pullup, enable); ++ err = mtk_pinconf_bias_get_pu_pd_rsel(hw, desc, pullup, enable); + if (!err) +- return err; ++ return 0; + } + + if (try_all_type & MTK_PULL_PU_PD_TYPE) { + err = mtk_pinconf_bias_get_pu_pd(hw, desc, pullup, enable); + if (!err) +- return err; ++ return 0; + } + + if (try_all_type & MTK_PULL_PULLSEL_TYPE) { + err = mtk_pinconf_bias_get_pullsel_pullen(hw, desc, + pullup, enable); + if (!err) +- return err; ++ return 0; + } + + if (try_all_type & MTK_PULL_PUPD_R1R0_TYPE) +diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c +index b02eaba010d10..b5a02335617d7 100644 +--- a/drivers/pinctrl/pinctrl-rockchip.c ++++ b/drivers/pinctrl/pinctrl-rockchip.c +@@ -3802,7 +3802,7 @@ static struct rockchip_pin_bank rk3328_pin_banks[] = { + PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", 0, 0, 0, 0), + PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", 0, 0, 0, 0), + PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", 0, +- 0, ++ IOMUX_WIDTH_2BIT, + IOMUX_WIDTH_3BIT, + 0), + PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3", +diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c +index 17e08f21756c3..bbe7cc894b1a1 100644 +--- a/drivers/pinctrl/pinctrl-single.c ++++ b/drivers/pinctrl/pinctrl-single.c +@@ -349,6 +349,8 @@ static int pcs_get_function(struct pinctrl_dev *pctldev, unsigned pin, + return -ENOTSUPP; + fselector = setting->func; + function = pinmux_generic_get_function(pctldev, fselector); ++ if (!function) ++ return -EINVAL; + *func = function->data; + if (!(*func)) { + dev_err(pcs->dev, "%s could not find function%i\n", +diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c +index b4f7995726894..a3fee55479d20 100644 +--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c ++++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c +@@ -805,12 +805,12 @@ static int jh7110_irq_set_type(struct irq_data *d, unsigned int trigger) + case IRQ_TYPE_LEVEL_HIGH: + irq_type = 0; /* 0: level triggered */ + edge_both = 0; /* 0: ignored */ +- polarity = mask; /* 1: high level */ ++ polarity = 0; /* 0: high level */ + break; + case IRQ_TYPE_LEVEL_LOW: + irq_type = 0; /* 0: level triggered */ + edge_both = 0; /* 0: ignored */ +- polarity = 0; /* 0: low level */ ++ polarity = mask; /* 1: low level */ + break; + default: + return -EINVAL; +diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c +index 44c6301f5f174..5b3681b9100c1 100644 +--- a/drivers/power/supply/qcom_battmgr.c ++++ b/drivers/power/supply/qcom_battmgr.c +@@ -1384,12 +1384,16 @@ static int qcom_battmgr_probe(struct auxiliary_device *adev, + "failed to register wireless charing power supply\n"); + } + +- battmgr->client = devm_pmic_glink_register_client(dev, +- PMIC_GLINK_OWNER_BATTMGR, +- qcom_battmgr_callback, +- qcom_battmgr_pdr_notify, +- battmgr); +- return PTR_ERR_OR_ZERO(battmgr->client); ++ battmgr->client = devm_pmic_glink_client_alloc(dev, PMIC_GLINK_OWNER_BATTMGR, ++ qcom_battmgr_callback, ++ qcom_battmgr_pdr_notify, ++ battmgr); ++ if (IS_ERR(battmgr->client)) ++ return PTR_ERR(battmgr->client); ++ ++ pmic_glink_client_register(battmgr->client); ++ ++ return 0; + } + + static const struct auxiliary_device_id qcom_battmgr_id_table[] = { +diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c +index bd99c5492b7d4..0f64b02443037 100644 +--- a/drivers/scsi/aacraid/comminit.c ++++ b/drivers/scsi/aacraid/comminit.c +@@ -642,6 +642,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) + + if (aac_comm_init(dev)<0){ + kfree(dev->queues); ++ dev->queues = NULL; + return NULL; + } + /* +@@ -649,6 +650,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) + */ + if (aac_fib_setup(dev) < 0) { + kfree(dev->queues); ++ dev->queues = NULL; + return NULL; + } + +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c +index 6dd43fff07ad5..4fce7cc32cb2c 100644 +--- a/drivers/scsi/sd.c ++++ b/drivers/scsi/sd.c +@@ -1690,13 +1690,15 @@ static int sd_sync_cache(struct scsi_disk *sdkp) + (sshdr.asc == 0x74 && sshdr.ascq == 0x71)) /* drive is password locked */ + /* this is no error here */ + return 0; ++ + /* +- * This drive doesn't support sync and there's not much +- * we can do because this is called during shutdown +- * or suspend so just return success so those operations +- * can proceed. ++ * If a format is in progress or if the drive does not ++ * support sync, there is not much we can do because ++ * this is called during shutdown or suspend so just ++ * return success so those operations can proceed. + */ +- if (sshdr.sense_key == ILLEGAL_REQUEST) ++ if ((sshdr.asc == 0x04 && sshdr.ascq == 0x04) || ++ sshdr.sense_key == ILLEGAL_REQUEST) + return 0; + } + +diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c +index c2d0e8fb7141a..ab2418d2fe43a 100644 +--- a/drivers/soc/qcom/cmd-db.c ++++ b/drivers/soc/qcom/cmd-db.c +@@ -354,7 +354,7 @@ static int cmd_db_dev_probe(struct platform_device *pdev) + return -EINVAL; + } + +- cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WB); ++ cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WC); + if (!cmd_db_header) { + ret = -ENOMEM; + cmd_db_header = NULL; +diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c +index 71d261ac8aa45..7e50eeb4cc6f2 100644 +--- a/drivers/soc/qcom/pmic_glink.c ++++ b/drivers/soc/qcom/pmic_glink.c +@@ -69,15 +69,14 @@ static void _devm_pmic_glink_release_client(struct device *dev, void *res) + spin_unlock_irqrestore(&pg->client_lock, flags); + } + +-struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev, +- unsigned int id, +- void (*cb)(const void *, size_t, void *), +- void (*pdr)(void *, int), +- void *priv) ++struct pmic_glink_client *devm_pmic_glink_client_alloc(struct device *dev, ++ unsigned int id, ++ void (*cb)(const void *, size_t, void *), ++ void (*pdr)(void *, int), ++ void *priv) + { + struct pmic_glink_client *client; + struct pmic_glink *pg = dev_get_drvdata(dev->parent); +- unsigned long flags; + + client = devres_alloc(_devm_pmic_glink_release_client, sizeof(*client), GFP_KERNEL); + if (!client) +@@ -88,6 +87,18 @@ struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev, + client->cb = cb; + client->pdr_notify = pdr; + client->priv = priv; ++ INIT_LIST_HEAD(&client->node); ++ ++ devres_add(dev, client); ++ ++ return client; ++} ++EXPORT_SYMBOL_GPL(devm_pmic_glink_client_alloc); ++ ++void pmic_glink_client_register(struct pmic_glink_client *client) ++{ ++ struct pmic_glink *pg = client->pg; ++ unsigned long flags; + + mutex_lock(&pg->state_lock); + spin_lock_irqsave(&pg->client_lock, flags); +@@ -98,11 +109,8 @@ struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev, + spin_unlock_irqrestore(&pg->client_lock, flags); + mutex_unlock(&pg->state_lock); + +- devres_add(dev, client); +- +- return client; + } +-EXPORT_SYMBOL_GPL(devm_pmic_glink_register_client); ++EXPORT_SYMBOL_GPL(pmic_glink_client_register); + + int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len) + { +@@ -178,7 +186,7 @@ static void pmic_glink_state_notify_clients(struct pmic_glink *pg) + if (pg->pdr_state == SERVREG_SERVICE_STATE_UP && pg->ept) + new_state = SERVREG_SERVICE_STATE_UP; + } else { +- if (pg->pdr_state == SERVREG_SERVICE_STATE_UP && pg->ept) ++ if (pg->pdr_state == SERVREG_SERVICE_STATE_DOWN || !pg->ept) + new_state = SERVREG_SERVICE_STATE_DOWN; + } + +diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c +index a35df66bb07b5..60b0d79efb0f1 100644 +--- a/drivers/soc/qcom/pmic_glink_altmode.c ++++ b/drivers/soc/qcom/pmic_glink_altmode.c +@@ -529,12 +529,17 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev, + return ret; + } + +- altmode->client = devm_pmic_glink_register_client(dev, +- altmode->owner_id, +- pmic_glink_altmode_callback, +- pmic_glink_altmode_pdr_notify, +- altmode); +- return PTR_ERR_OR_ZERO(altmode->client); ++ altmode->client = devm_pmic_glink_client_alloc(dev, ++ altmode->owner_id, ++ pmic_glink_altmode_callback, ++ pmic_glink_altmode_pdr_notify, ++ altmode); ++ if (IS_ERR(altmode->client)) ++ return PTR_ERR(altmode->client); ++ ++ pmic_glink_client_register(altmode->client); ++ ++ return 0; + } + + static const struct auxiliary_device_id pmic_glink_altmode_id_table[] = { +diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c +index 68d54887992d9..cf69d5c415fbf 100644 +--- a/drivers/soundwire/stream.c ++++ b/drivers/soundwire/stream.c +@@ -1286,18 +1286,18 @@ struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave, + unsigned int port_num) + { + struct sdw_dpn_prop *dpn_prop; +- u8 num_ports; ++ unsigned long mask; + int i; + + if (direction == SDW_DATA_DIR_TX) { +- num_ports = hweight32(slave->prop.source_ports); ++ mask = slave->prop.source_ports; + dpn_prop = slave->prop.src_dpn_prop; + } else { +- num_ports = hweight32(slave->prop.sink_ports); ++ mask = slave->prop.sink_ports; + dpn_prop = slave->prop.sink_dpn_prop; + } + +- for (i = 0; i < num_ports; i++) { ++ for_each_set_bit(i, &mask, 32) { + if (dpn_prop[i].num == port_num) + return &dpn_prop[i]; + } +diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c +index d5174f11b91c2..d8dfcd49695d3 100644 +--- a/drivers/thermal/thermal_of.c ++++ b/drivers/thermal/thermal_of.c +@@ -123,7 +123,7 @@ static int thermal_of_populate_trip(struct device_node *np, + static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *ntrips) + { + struct thermal_trip *tt; +- struct device_node *trips, *trip; ++ struct device_node *trips; + int ret, count; + + trips = of_get_child_by_name(np, "trips"); +@@ -148,7 +148,7 @@ static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *n + *ntrips = count; + + count = 0; +- for_each_child_of_node(trips, trip) { ++ for_each_child_of_node_scoped(trips, trip) { + ret = thermal_of_populate_trip(trip, &tt[count++]); + if (ret) + goto out_kfree; +@@ -182,14 +182,14 @@ static struct device_node *of_thermal_zone_find(struct device_node *sensor, int + * Search for each thermal zone, a defined sensor + * corresponding to the one passed as parameter + */ +- for_each_available_child_of_node(np, tz) { ++ for_each_available_child_of_node_scoped(np, child) { + + int count, i; + +- count = of_count_phandle_with_args(tz, "thermal-sensors", ++ count = of_count_phandle_with_args(child, "thermal-sensors", + "#thermal-sensor-cells"); + if (count <= 0) { +- pr_err("%pOFn: missing thermal sensor\n", tz); ++ pr_err("%pOFn: missing thermal sensor\n", child); + tz = ERR_PTR(-EINVAL); + goto out; + } +@@ -198,18 +198,19 @@ static struct device_node *of_thermal_zone_find(struct device_node *sensor, int + + int ret; + +- ret = of_parse_phandle_with_args(tz, "thermal-sensors", ++ ret = of_parse_phandle_with_args(child, "thermal-sensors", + "#thermal-sensor-cells", + i, &sensor_specs); + if (ret < 0) { +- pr_err("%pOFn: Failed to read thermal-sensors cells: %d\n", tz, ret); ++ pr_err("%pOFn: Failed to read thermal-sensors cells: %d\n", child, ret); + tz = ERR_PTR(ret); + goto out; + } + + if ((sensor == sensor_specs.np) && id == (sensor_specs.args_count ? + sensor_specs.args[0] : 0)) { +- pr_debug("sensor %pOFn id=%d belongs to %pOFn\n", sensor, id, tz); ++ pr_debug("sensor %pOFn id=%d belongs to %pOFn\n", sensor, id, child); ++ tz = no_free_ptr(child); + goto out; + } + } +diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h +index e1b5801fdddf8..9a5577a772af6 100644 +--- a/drivers/usb/cdns3/cdnsp-gadget.h ++++ b/drivers/usb/cdns3/cdnsp-gadget.h +@@ -811,6 +811,7 @@ struct cdnsp_stream_info { + * generate Missed Service Error Event. + * Set skip flag when receive a Missed Service Error Event and + * process the missed tds on the endpoint ring. ++ * @wa1_nop_trb: hold pointer to NOP trb. + */ + struct cdnsp_ep { + struct usb_ep endpoint; +@@ -838,6 +839,8 @@ struct cdnsp_ep { + #define EP_UNCONFIGURED BIT(7) + + bool skip; ++ union cdnsp_trb *wa1_nop_trb; ++ + }; + + /** +diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c +index 275a6a2fa671e..e0e97a1386662 100644 +--- a/drivers/usb/cdns3/cdnsp-ring.c ++++ b/drivers/usb/cdns3/cdnsp-ring.c +@@ -402,7 +402,7 @@ static u64 cdnsp_get_hw_deq(struct cdnsp_device *pdev, + struct cdnsp_stream_ctx *st_ctx; + struct cdnsp_ep *pep; + +- pep = &pdev->eps[stream_id]; ++ pep = &pdev->eps[ep_index]; + + if (pep->ep_state & EP_HAS_STREAMS) { + st_ctx = &pep->stream_info.stream_ctx_array[stream_id]; +@@ -1904,6 +1904,23 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) + if (ret) + return ret; + ++ /* ++ * workaround 1: STOP EP command on LINK TRB with TC bit set to 1 ++ * causes that internal cycle bit can have incorrect state after ++ * command complete. In consequence empty transfer ring can be ++ * incorrectly detected when EP is resumed. ++ * NOP TRB before LINK TRB avoid such scenario. STOP EP command is ++ * then on NOP TRB and internal cycle bit is not changed and have ++ * correct value. ++ */ ++ if (pep->wa1_nop_trb) { ++ field = le32_to_cpu(pep->wa1_nop_trb->trans_event.flags); ++ field ^= TRB_CYCLE; ++ ++ pep->wa1_nop_trb->trans_event.flags = cpu_to_le32(field); ++ pep->wa1_nop_trb = NULL; ++ } ++ + /* + * Don't give the first TRB to the hardware (by toggling the cycle bit) + * until we've finished creating all the other TRBs. The ring's cycle +@@ -1999,6 +2016,17 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) + send_addr = addr; + } + ++ if (cdnsp_trb_is_link(ring->enqueue + 1)) { ++ field = TRB_TYPE(TRB_TR_NOOP) | TRB_IOC; ++ if (!ring->cycle_state) ++ field |= TRB_CYCLE; ++ ++ pep->wa1_nop_trb = ring->enqueue; ++ ++ cdnsp_queue_trb(pdev, ring, 0, 0x0, 0x0, ++ TRB_INTR_TARGET(0), field); ++ } ++ + cdnsp_check_trb_math(preq, enqd_len); + ret = cdnsp_giveback_first_trb(pdev, pep, preq->request.stream_id, + start_cycle, start_trb); +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 0e7439dba8fe8..0c1b69d944ca4 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -1761,6 +1761,9 @@ static const struct usb_device_id acm_ids[] = { + { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */ + .driver_info = SINGLE_RX_URB, + }, ++ { USB_DEVICE(0x1901, 0x0006), /* GE Healthcare Patient Monitor UI Controller */ ++ .driver_info = DISABLE_ECHO, /* DISABLE ECHO in termios flag */ ++ }, + { USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */ + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ + }, +diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c +index f91f543ec636d..164edebc7f1f5 100644 +--- a/drivers/usb/core/sysfs.c ++++ b/drivers/usb/core/sysfs.c +@@ -668,6 +668,7 @@ static int add_power_attributes(struct device *dev) + + static void remove_power_attributes(struct device *dev) + { ++ sysfs_unmerge_group(&dev->kobj, &usb3_hardware_lpm_attr_group); + sysfs_unmerge_group(&dev->kobj, &usb2_hardware_lpm_attr_group); + sysfs_unmerge_group(&dev->kobj, &power_attr_group); + } +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c +index 5ce276cb39d0d..21ff6cbe5e49f 100644 +--- a/drivers/usb/dwc3/core.c ++++ b/drivers/usb/dwc3/core.c +@@ -553,9 +553,17 @@ int dwc3_event_buffers_setup(struct dwc3 *dwc) + void dwc3_event_buffers_cleanup(struct dwc3 *dwc) + { + struct dwc3_event_buffer *evt; ++ u32 reg; + + if (!dwc->ev_buf) + return; ++ /* ++ * Exynos platforms may not be able to access event buffer if the ++ * controller failed to halt on dwc3_core_exit(). ++ */ ++ reg = dwc3_readl(dwc->regs, DWC3_DSTS); ++ if (!(reg & DWC3_DSTS_DEVCTRLHLT)) ++ return; + + evt = dwc->ev_buf; + +diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c +index d5c77db4daa92..2a11fc0ee84f1 100644 +--- a/drivers/usb/dwc3/dwc3-omap.c ++++ b/drivers/usb/dwc3/dwc3-omap.c +@@ -522,11 +522,13 @@ static int dwc3_omap_probe(struct platform_device *pdev) + if (ret) { + dev_err(dev, "failed to request IRQ #%d --> %d\n", + omap->irq, ret); +- goto err1; ++ goto err2; + } + dwc3_omap_enable_irqs(omap); + return 0; + ++err2: ++ of_platform_depopulate(dev); + err1: + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); +diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c +index 211360eee95a0..c8c7cd0c17969 100644 +--- a/drivers/usb/dwc3/dwc3-st.c ++++ b/drivers/usb/dwc3/dwc3-st.c +@@ -219,10 +219,8 @@ static int st_dwc3_probe(struct platform_device *pdev) + dwc3_data->regmap = regmap; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "syscfg-reg"); +- if (!res) { +- ret = -ENXIO; +- goto undo_platform_dev_alloc; +- } ++ if (!res) ++ return -ENXIO; + + dwc3_data->syscfg_reg_off = res->start; + +@@ -233,8 +231,7 @@ static int st_dwc3_probe(struct platform_device *pdev) + devm_reset_control_get_exclusive(dev, "powerdown"); + if (IS_ERR(dwc3_data->rstc_pwrdn)) { + dev_err(&pdev->dev, "could not get power controller\n"); +- ret = PTR_ERR(dwc3_data->rstc_pwrdn); +- goto undo_platform_dev_alloc; ++ return PTR_ERR(dwc3_data->rstc_pwrdn); + } + + /* Manage PowerDown */ +@@ -269,7 +266,7 @@ static int st_dwc3_probe(struct platform_device *pdev) + if (!child_pdev) { + dev_err(dev, "failed to find dwc3 core device\n"); + ret = -ENODEV; +- goto err_node_put; ++ goto depopulate; + } + + dwc3_data->dr_mode = usb_get_dr_mode(&child_pdev->dev); +@@ -285,6 +282,7 @@ static int st_dwc3_probe(struct platform_device *pdev) + ret = st_dwc3_drd_init(dwc3_data); + if (ret) { + dev_err(dev, "drd initialisation failed\n"); ++ of_platform_depopulate(dev); + goto undo_softreset; + } + +@@ -294,14 +292,14 @@ static int st_dwc3_probe(struct platform_device *pdev) + platform_set_drvdata(pdev, dwc3_data); + return 0; + ++depopulate: ++ of_platform_depopulate(dev); + err_node_put: + of_node_put(child); + undo_softreset: + reset_control_assert(dwc3_data->rstc_rst); + undo_powerdown: + reset_control_assert(dwc3_data->rstc_pwrdn); +-undo_platform_dev_alloc: +- platform_device_put(pdev); + return ret; + } + +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 311040f9b9352..176f38750ad58 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -619,6 +619,8 @@ static void option_instat_callback(struct urb *urb); + + /* MeiG Smart Technology products */ + #define MEIGSMART_VENDOR_ID 0x2dee ++/* MeiG Smart SRM825L based on Qualcomm 315 */ ++#define MEIGSMART_PRODUCT_SRM825L 0x4d22 + /* MeiG Smart SLM320 based on UNISOC UIS8910 */ + #define MEIGSMART_PRODUCT_SLM320 0x4d41 + +@@ -2366,6 +2368,9 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x30) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x40) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x60) }, + { } /* Terminating entry */ + }; + MODULE_DEVICE_TABLE(usb, option_ids); +diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c +index 7db9c382c354d..e053b6e99b9e4 100644 +--- a/drivers/usb/typec/tcpm/tcpm.c ++++ b/drivers/usb/typec/tcpm/tcpm.c +@@ -2403,7 +2403,7 @@ static int tcpm_register_source_caps(struct tcpm_port *port) + { + struct usb_power_delivery_desc desc = { port->negotiated_rev }; + struct usb_power_delivery_capabilities_desc caps = { }; +- struct usb_power_delivery_capabilities *cap; ++ struct usb_power_delivery_capabilities *cap = port->partner_source_caps; + + if (!port->partner_pd) + port->partner_pd = usb_power_delivery_register(NULL, &desc); +@@ -2413,6 +2413,11 @@ static int tcpm_register_source_caps(struct tcpm_port *port) + memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps); + caps.role = TYPEC_SOURCE; + ++ if (cap) { ++ usb_power_delivery_unregister_capabilities(cap); ++ port->partner_source_caps = NULL; ++ } ++ + cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps); + if (IS_ERR(cap)) + return PTR_ERR(cap); +@@ -2426,7 +2431,7 @@ static int tcpm_register_sink_caps(struct tcpm_port *port) + { + struct usb_power_delivery_desc desc = { port->negotiated_rev }; + struct usb_power_delivery_capabilities_desc caps = { }; +- struct usb_power_delivery_capabilities *cap = port->partner_source_caps; ++ struct usb_power_delivery_capabilities *cap; + + if (!port->partner_pd) + port->partner_pd = usb_power_delivery_register(NULL, &desc); +@@ -2436,11 +2441,6 @@ static int tcpm_register_sink_caps(struct tcpm_port *port) + memcpy(caps.pdo, port->sink_caps, sizeof(u32) * port->nr_sink_caps); + caps.role = TYPEC_SINK; + +- if (cap) { +- usb_power_delivery_unregister_capabilities(cap); +- port->partner_source_caps = NULL; +- } +- + cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps); + if (IS_ERR(cap)) + return PTR_ERR(cap); +diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c +index 7625e28e12a80..94f2df02f06ee 100644 +--- a/drivers/usb/typec/ucsi/ucsi_glink.c ++++ b/drivers/usb/typec/ucsi/ucsi_glink.c +@@ -376,12 +376,16 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev, + "failed to acquire orientation-switch\n"); + } + +- ucsi->client = devm_pmic_glink_register_client(dev, +- PMIC_GLINK_OWNER_USBC, +- pmic_glink_ucsi_callback, +- pmic_glink_ucsi_pdr_notify, +- ucsi); +- return PTR_ERR_OR_ZERO(ucsi->client); ++ ucsi->client = devm_pmic_glink_client_alloc(dev, PMIC_GLINK_OWNER_USBC, ++ pmic_glink_ucsi_callback, ++ pmic_glink_ucsi_pdr_notify, ++ ucsi); ++ if (IS_ERR(ucsi->client)) ++ return PTR_ERR(ucsi->client); ++ ++ pmic_glink_client_register(ucsi->client); ++ ++ return 0; + } + + static void pmic_glink_ucsi_remove(struct auxiliary_device *adev) +diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c +index 29257d2639dbf..76f6f26265a3b 100644 +--- a/drivers/xen/grant-dma-ops.c ++++ b/drivers/xen/grant-dma-ops.c +@@ -282,7 +282,7 @@ static int xen_grant_dma_supported(struct device *dev, u64 mask) + static const struct dma_map_ops xen_grant_dma_ops = { + .alloc = xen_grant_dma_alloc, + .free = xen_grant_dma_free, +- .alloc_pages_op = xen_grant_dma_alloc_pages, ++ .alloc_pages = xen_grant_dma_alloc_pages, + .free_pages = xen_grant_dma_free_pages, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c +index 1c4ef5111651d..0e6c6c25d154f 100644 +--- a/drivers/xen/swiotlb-xen.c ++++ b/drivers/xen/swiotlb-xen.c +@@ -403,7 +403,7 @@ const struct dma_map_ops xen_swiotlb_dma_ops = { + .dma_supported = xen_swiotlb_dma_supported, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, +- .alloc_pages_op = dma_common_alloc_pages, ++ .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + .max_mapping_size = swiotlb_max_mapping_size, + }; +diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c +index e47eb248309f8..650972895652d 100644 +--- a/fs/btrfs/bio.c ++++ b/fs/btrfs/bio.c +@@ -646,7 +646,6 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) + { + struct btrfs_inode *inode = bbio->inode; + struct btrfs_fs_info *fs_info = bbio->fs_info; +- struct btrfs_bio *orig_bbio = bbio; + struct bio *bio = &bbio->bio; + u64 logical = bio->bi_iter.bi_sector << SECTOR_SHIFT; + u64 length = bio->bi_iter.bi_size; +@@ -682,7 +681,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) + bbio->saved_iter = bio->bi_iter; + ret = btrfs_lookup_bio_sums(bbio); + if (ret) +- goto fail_put_bio; ++ goto fail; + } + + if (btrfs_op(bio) == BTRFS_MAP_WRITE) { +@@ -704,13 +703,13 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) + + ret = btrfs_bio_csum(bbio); + if (ret) +- goto fail_put_bio; ++ goto fail; + } else if (use_append || + (btrfs_is_zoned(fs_info) && inode && + inode->flags & BTRFS_INODE_NODATASUM)) { + ret = btrfs_alloc_dummy_sum(bbio); + if (ret) +- goto fail_put_bio; ++ goto fail; + } + } + +@@ -718,12 +717,23 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) + done: + return map_length == length; + +-fail_put_bio: +- if (map_length < length) +- btrfs_cleanup_bio(bbio); + fail: + btrfs_bio_counter_dec(fs_info); +- btrfs_bio_end_io(orig_bbio, ret); ++ /* ++ * We have split the original bbio, now we have to end both the current ++ * @bbio and remaining one, as the remaining one will never be submitted. ++ */ ++ if (map_length < length) { ++ struct btrfs_bio *remaining = bbio->private; ++ ++ ASSERT(bbio->bio.bi_pool == &btrfs_clone_bioset); ++ ASSERT(remaining); ++ ++ remaining->bio.bi_status = ret; ++ btrfs_orig_bbio_end_io(remaining); ++ } ++ bbio->bio.bi_status = ret; ++ btrfs_orig_bbio_end_io(bbio); + /* Do not submit another chunk */ + return true; + } +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c +index efe84be65a440..74b82390fe847 100644 +--- a/fs/btrfs/qgroup.c ++++ b/fs/btrfs/qgroup.c +@@ -3762,6 +3762,8 @@ static int try_flush_qgroup(struct btrfs_root *root) + return 0; + } + ++ btrfs_run_delayed_iputs(root->fs_info); ++ btrfs_wait_on_delayed_iputs(root->fs_info); + ret = btrfs_start_delalloc_snapshot(root, true); + if (ret < 0) + goto out; +diff --git a/fs/overlayfs/params.c b/fs/overlayfs/params.c +index 488f920f79d28..0f3768cf9e871 100644 +--- a/fs/overlayfs/params.c ++++ b/fs/overlayfs/params.c +@@ -357,6 +357,8 @@ static void ovl_add_layer(struct fs_context *fc, enum ovl_opt layer, + case Opt_datadir_add: + ctx->nr_data++; + fallthrough; ++ case Opt_lowerdir: ++ fallthrough; + case Opt_lowerdir_add: + WARN_ON(ctx->nr >= ctx->capacity); + l = &ctx->lower[ctx->nr++]; +@@ -369,10 +371,9 @@ static void ovl_add_layer(struct fs_context *fc, enum ovl_opt layer, + } + } + +-static int ovl_parse_layer(struct fs_context *fc, struct fs_parameter *param, +- enum ovl_opt layer) ++static int ovl_parse_layer(struct fs_context *fc, const char *layer_name, enum ovl_opt layer) + { +- char *name = kstrdup(param->string, GFP_KERNEL); ++ char *name = kstrdup(layer_name, GFP_KERNEL); + bool upper = (layer == Opt_upperdir || layer == Opt_workdir); + struct path path; + int err; +@@ -380,7 +381,7 @@ static int ovl_parse_layer(struct fs_context *fc, struct fs_parameter *param, + if (!name) + return -ENOMEM; + +- if (upper) ++ if (upper || layer == Opt_lowerdir) + err = ovl_mount_dir(name, &path); + else + err = ovl_mount_dir_noesc(name, &path); +@@ -436,7 +437,6 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc) + { + int err; + struct ovl_fs_context *ctx = fc->fs_private; +- struct ovl_fs_context_layer *l; + char *dup = NULL, *iter; + ssize_t nr_lower, nr; + bool data_layer = false; +@@ -453,7 +453,7 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc) + return 0; + + if (*name == ':') { +- pr_err("cannot append lower layer"); ++ pr_err("cannot append lower layer\n"); + return -EINVAL; + } + +@@ -476,35 +476,11 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc) + goto out_err; + } + +- if (nr_lower > ctx->capacity) { +- err = -ENOMEM; +- l = krealloc_array(ctx->lower, nr_lower, sizeof(*ctx->lower), +- GFP_KERNEL_ACCOUNT); +- if (!l) +- goto out_err; +- +- ctx->lower = l; +- ctx->capacity = nr_lower; +- } +- + iter = dup; +- l = ctx->lower; +- for (nr = 0; nr < nr_lower; nr++, l++) { +- ctx->nr++; +- memset(l, 0, sizeof(*l)); +- +- err = ovl_mount_dir(iter, &l->path); ++ for (nr = 0; nr < nr_lower; nr++) { ++ err = ovl_parse_layer(fc, iter, Opt_lowerdir); + if (err) +- goto out_put; +- +- err = ovl_mount_dir_check(fc, &l->path, Opt_lowerdir, iter, false); +- if (err) +- goto out_put; +- +- err = -ENOMEM; +- l->name = kstrdup(iter, GFP_KERNEL_ACCOUNT); +- if (!l->name) +- goto out_put; ++ goto out_err; + + if (data_layer) + ctx->nr_data++; +@@ -521,8 +497,8 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc) + * there are no data layers. + */ + if (ctx->nr_data > 0) { +- pr_err("regular lower layers cannot follow data lower layers"); +- goto out_put; ++ pr_err("regular lower layers cannot follow data lower layers\n"); ++ goto out_err; + } + + data_layer = false; +@@ -536,9 +512,6 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc) + kfree(dup); + return 0; + +-out_put: +- ovl_reset_lowerdirs(ctx); +- + out_err: + kfree(dup); + +@@ -586,7 +559,7 @@ static int ovl_parse_param(struct fs_context *fc, struct fs_parameter *param) + case Opt_datadir_add: + case Opt_upperdir: + case Opt_workdir: +- err = ovl_parse_layer(fc, param, opt); ++ err = ovl_parse_layer(fc, param->string, opt); + break; + case Opt_default_permissions: + config->default_permissions = true; +diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c +index 66cfce456263b..012d6ec12a691 100644 +--- a/fs/smb/client/smb2ops.c ++++ b/fs/smb/client/smb2ops.c +@@ -3251,6 +3251,7 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, + struct inode *inode = file_inode(file); + struct cifsFileInfo *cfile = file->private_data; + struct file_zero_data_information fsctl_buf; ++ unsigned long long end = offset + len, i_size, remote_i_size; + long rc; + unsigned int xid; + __u8 set_sparse = 1; +@@ -3282,6 +3283,27 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, + (char *)&fsctl_buf, + sizeof(struct file_zero_data_information), + CIFSMaxBufSize, NULL, NULL); ++ ++ if (rc) ++ goto unlock; ++ ++ /* If there's dirty data in the buffer that would extend the EOF if it ++ * were written, then we need to move the EOF marker over to the lower ++ * of the high end of the hole and the proposed EOF. The problem is ++ * that we locally hole-punch the tail of the dirty data, the proposed ++ * EOF update will end up in the wrong place. ++ */ ++ i_size = i_size_read(inode); ++ remote_i_size = netfs_inode(inode)->remote_i_size; ++ if (end > remote_i_size && i_size > remote_i_size) { ++ unsigned long long extend_to = umin(end, i_size); ++ rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, ++ cfile->fid.volatile_fid, cfile->pid, extend_to); ++ if (rc >= 0) ++ netfs_inode(inode)->remote_i_size = extend_to; ++ } ++ ++unlock: + filemap_invalidate_unlock(inode->i_mapping); + out: + inode_unlock(inode); +diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c +index 61df8a5c68242..bf45b8652e580 100644 +--- a/fs/smb/client/smb2pdu.c ++++ b/fs/smb/client/smb2pdu.c +@@ -4431,7 +4431,7 @@ smb2_new_read_req(void **buf, unsigned int *total_len, + * If we want to do a RDMA write, fill in and append + * smbd_buffer_descriptor_v1 to the end of read request + */ +- if (smb3_use_rdma_offload(io_parms)) { ++ if (rdata && smb3_use_rdma_offload(io_parms)) { + struct smbd_buffer_descriptor_v1 *v1; + bool need_invalidate = server->dialect == SMB30_PROT_ID; + +diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h +index 3a8a015fdd2ed..f2fc203fb8a1a 100644 +--- a/include/linux/dma-map-ops.h ++++ b/include/linux/dma-map-ops.h +@@ -28,7 +28,7 @@ struct dma_map_ops { + unsigned long attrs); + void (*free)(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs); +- struct page *(*alloc_pages_op)(struct device *dev, size_t size, ++ struct page *(*alloc_pages)(struct device *dev, size_t size, + dma_addr_t *dma_handle, enum dma_data_direction dir, + gfp_t gfp); + void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, +diff --git a/include/linux/of.h b/include/linux/of.h +index 6a9ddf20e79ab..024dda54b9c77 100644 +--- a/include/linux/of.h ++++ b/include/linux/of.h +@@ -13,6 +13,7 @@ + */ + #include + #include ++#include + #include + #include + #include +@@ -134,6 +135,7 @@ static inline struct device_node *of_node_get(struct device_node *node) + } + static inline void of_node_put(struct device_node *node) { } + #endif /* !CONFIG_OF_DYNAMIC */ ++DEFINE_FREE(device_node, struct device_node *, if (_T) of_node_put(_T)) + + /* Pointer for first entry in chain of all nodes. */ + extern struct device_node *of_root; +@@ -1428,10 +1430,23 @@ static inline int of_property_read_s32(const struct device_node *np, + #define for_each_child_of_node(parent, child) \ + for (child = of_get_next_child(parent, NULL); child != NULL; \ + child = of_get_next_child(parent, child)) ++ ++#define for_each_child_of_node_scoped(parent, child) \ ++ for (struct device_node *child __free(device_node) = \ ++ of_get_next_child(parent, NULL); \ ++ child != NULL; \ ++ child = of_get_next_child(parent, child)) ++ + #define for_each_available_child_of_node(parent, child) \ + for (child = of_get_next_available_child(parent, NULL); child != NULL; \ + child = of_get_next_available_child(parent, child)) + ++#define for_each_available_child_of_node_scoped(parent, child) \ ++ for (struct device_node *child __free(device_node) = \ ++ of_get_next_available_child(parent, NULL); \ ++ child != NULL; \ ++ child = of_get_next_available_child(parent, child)) ++ + #define for_each_of_cpu_node(cpu) \ + for (cpu = of_get_next_cpu_node(NULL); cpu != NULL; \ + cpu = of_get_next_cpu_node(cpu)) +diff --git a/include/linux/soc/qcom/pmic_glink.h b/include/linux/soc/qcom/pmic_glink.h +index fd124aa18c81a..7cddf10277528 100644 +--- a/include/linux/soc/qcom/pmic_glink.h ++++ b/include/linux/soc/qcom/pmic_glink.h +@@ -23,10 +23,11 @@ struct pmic_glink_hdr { + + int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len); + +-struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev, +- unsigned int id, +- void (*cb)(const void *, size_t, void *), +- void (*pdr)(void *, int), +- void *priv); ++struct pmic_glink_client *devm_pmic_glink_client_alloc(struct device *dev, ++ unsigned int id, ++ void (*cb)(const void *, size_t, void *), ++ void (*pdr)(void *, int), ++ void *priv); ++void pmic_glink_client_register(struct pmic_glink_client *client); + + #endif +diff --git a/include/net/bonding.h b/include/net/bonding.h +index 5b8b1b644a2db..94594026a5c55 100644 +--- a/include/net/bonding.h ++++ b/include/net/bonding.h +@@ -258,7 +258,7 @@ struct bonding { + #ifdef CONFIG_XFRM_OFFLOAD + struct list_head ipsec_list; + /* protecting ipsec_list */ +- spinlock_t ipsec_lock; ++ struct mutex ipsec_lock; + #endif /* CONFIG_XFRM_OFFLOAD */ + struct bpf_prog *xdp_prog; + }; +diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h +index 4dabeb6c76d31..9f2ce4d05c265 100644 +--- a/include/net/busy_poll.h ++++ b/include/net/busy_poll.h +@@ -64,7 +64,7 @@ static inline bool sk_can_busy_loop(struct sock *sk) + static inline unsigned long busy_loop_current_time(void) + { + #ifdef CONFIG_NET_RX_BUSY_POLL +- return (unsigned long)(local_clock() >> 10); ++ return (unsigned long)(ktime_get_ns() >> 10); + #else + return 0; + #endif +diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h +index 60a7d0ce30804..fcf967286e37c 100644 +--- a/include/net/netfilter/nf_tables_ipv4.h ++++ b/include/net/netfilter/nf_tables_ipv4.h +@@ -19,7 +19,7 @@ static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt) + static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt) + { + struct iphdr *iph, _iph; +- u32 len, thoff; ++ u32 len, thoff, skb_len; + + iph = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb), + sizeof(*iph), &_iph); +@@ -30,8 +30,10 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt) + return -1; + + len = iph_totlen(pkt->skb, iph); +- thoff = skb_network_offset(pkt->skb) + (iph->ihl * 4); +- if (pkt->skb->len < len) ++ thoff = iph->ihl * 4; ++ skb_len = pkt->skb->len - skb_network_offset(pkt->skb); ++ ++ if (skb_len < len) + return -1; + else if (len < thoff) + return -1; +@@ -40,7 +42,7 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt) + + pkt->flags = NFT_PKTINFO_L4PROTO; + pkt->tprot = iph->protocol; +- pkt->thoff = thoff; ++ pkt->thoff = skb_network_offset(pkt->skb) + thoff; + pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET; + + return 0; +diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h +index 467d59b9e5334..a0633eeaec977 100644 +--- a/include/net/netfilter/nf_tables_ipv6.h ++++ b/include/net/netfilter/nf_tables_ipv6.h +@@ -31,8 +31,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt) + struct ipv6hdr *ip6h, _ip6h; + unsigned int thoff = 0; + unsigned short frag_off; ++ u32 pkt_len, skb_len; + int protohdr; +- u32 pkt_len; + + ip6h = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb), + sizeof(*ip6h), &_ip6h); +@@ -43,7 +43,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt) + return -1; + + pkt_len = ntohs(ip6h->payload_len); +- if (pkt_len + sizeof(*ip6h) > pkt->skb->len) ++ skb_len = pkt->skb->len - skb_network_offset(pkt->skb); ++ if (pkt_len + sizeof(*ip6h) > skb_len) + return -1; + + protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags); +diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c +index 2923f3b2dd2c7..f1d9f01b283d7 100644 +--- a/kernel/dma/mapping.c ++++ b/kernel/dma/mapping.c +@@ -570,9 +570,9 @@ static struct page *__dma_alloc_pages(struct device *dev, size_t size, + size = PAGE_ALIGN(size); + if (dma_alloc_direct(dev, ops)) + return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); +- if (!ops->alloc_pages_op) ++ if (!ops->alloc_pages) + return NULL; +- return ops->alloc_pages_op(dev, size, dma_handle, dir, gfp); ++ return ops->alloc_pages(dev, size, dma_handle, dir, gfp); + } + + struct page *dma_alloc_pages(struct device *dev, size_t size, +diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h +index 02b727a54648f..3db42bae73f8e 100644 +--- a/kernel/trace/trace.h ++++ b/kernel/trace/trace.h +@@ -1555,6 +1555,29 @@ static inline void *event_file_data(struct file *filp) + extern struct mutex event_mutex; + extern struct list_head ftrace_events; + ++/* ++ * When the trace_event_file is the filp->i_private pointer, ++ * it must be taken under the event_mutex lock, and then checked ++ * if the EVENT_FILE_FL_FREED flag is set. If it is, then the ++ * data pointed to by the trace_event_file can not be trusted. ++ * ++ * Use the event_file_file() to access the trace_event_file from ++ * the filp the first time under the event_mutex and check for ++ * NULL. If it is needed to be retrieved again and the event_mutex ++ * is still held, then the event_file_data() can be used and it ++ * is guaranteed to be valid. ++ */ ++static inline struct trace_event_file *event_file_file(struct file *filp) ++{ ++ struct trace_event_file *file; ++ ++ lockdep_assert_held(&event_mutex); ++ file = READ_ONCE(file_inode(filp)->i_private); ++ if (!file || file->flags & EVENT_FILE_FL_FREED) ++ return NULL; ++ return file; ++} ++ + extern const struct file_operations event_trigger_fops; + extern const struct file_operations event_hist_fops; + extern const struct file_operations event_hist_debug_fops; +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c +index 2ae0f2807438a..c68dc50c8becf 100644 +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -1386,12 +1386,12 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, + char buf[4] = "0"; + + mutex_lock(&event_mutex); +- file = event_file_data(filp); ++ file = event_file_file(filp); + if (likely(file)) + flags = file->flags; + mutex_unlock(&event_mutex); + +- if (!file || flags & EVENT_FILE_FL_FREED) ++ if (!file) + return -ENODEV; + + if (flags & EVENT_FILE_FL_ENABLED && +@@ -1428,8 +1428,8 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, + case 1: + ret = -ENODEV; + mutex_lock(&event_mutex); +- file = event_file_data(filp); +- if (likely(file && !(file->flags & EVENT_FILE_FL_FREED))) ++ file = event_file_file(filp); ++ if (likely(file)) + ret = ftrace_event_enable_disable(file, val); + mutex_unlock(&event_mutex); + break; +@@ -1538,7 +1538,8 @@ enum { + + static void *f_next(struct seq_file *m, void *v, loff_t *pos) + { +- struct trace_event_call *call = event_file_data(m->private); ++ struct trace_event_file *file = event_file_data(m->private); ++ struct trace_event_call *call = file->event_call; + struct list_head *common_head = &ftrace_common_fields; + struct list_head *head = trace_get_fields(call); + struct list_head *node = v; +@@ -1570,7 +1571,8 @@ static void *f_next(struct seq_file *m, void *v, loff_t *pos) + + static int f_show(struct seq_file *m, void *v) + { +- struct trace_event_call *call = event_file_data(m->private); ++ struct trace_event_file *file = event_file_data(m->private); ++ struct trace_event_call *call = file->event_call; + struct ftrace_event_field *field; + const char *array_descriptor; + +@@ -1625,12 +1627,14 @@ static int f_show(struct seq_file *m, void *v) + + static void *f_start(struct seq_file *m, loff_t *pos) + { ++ struct trace_event_file *file; + void *p = (void *)FORMAT_HEADER; + loff_t l = 0; + + /* ->stop() is called even if ->start() fails */ + mutex_lock(&event_mutex); +- if (!event_file_data(m->private)) ++ file = event_file_file(m->private); ++ if (!file) + return ERR_PTR(-ENODEV); + + while (l < *pos && p) +@@ -1704,8 +1708,8 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, + trace_seq_init(s); + + mutex_lock(&event_mutex); +- file = event_file_data(filp); +- if (file && !(file->flags & EVENT_FILE_FL_FREED)) ++ file = event_file_file(filp); ++ if (file) + print_event_filter(file, s); + mutex_unlock(&event_mutex); + +@@ -1734,9 +1738,13 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, + return PTR_ERR(buf); + + mutex_lock(&event_mutex); +- file = event_file_data(filp); +- if (file) +- err = apply_event_filter(file, buf); ++ file = event_file_file(filp); ++ if (file) { ++ if (file->flags & EVENT_FILE_FL_FREED) ++ err = -ENODEV; ++ else ++ err = apply_event_filter(file, buf); ++ } + mutex_unlock(&event_mutex); + + kfree(buf); +@@ -2451,7 +2459,6 @@ static int event_callback(const char *name, umode_t *mode, void **data, + if (strcmp(name, "format") == 0) { + *mode = TRACE_MODE_READ; + *fops = &ftrace_event_format_fops; +- *data = call; + return 1; + } + +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c +index 68aaf0bd7a78d..dd16faf0d1500 100644 +--- a/kernel/trace/trace_events_hist.c ++++ b/kernel/trace/trace_events_hist.c +@@ -5609,7 +5609,7 @@ static int hist_show(struct seq_file *m, void *v) + + mutex_lock(&event_mutex); + +- event_file = event_file_data(m->private); ++ event_file = event_file_file(m->private); + if (unlikely(!event_file)) { + ret = -ENODEV; + goto out_unlock; +@@ -5888,7 +5888,7 @@ static int hist_debug_show(struct seq_file *m, void *v) + + mutex_lock(&event_mutex); + +- event_file = event_file_data(m->private); ++ event_file = event_file_file(m->private); + if (unlikely(!event_file)) { + ret = -ENODEV; + goto out_unlock; +diff --git a/kernel/trace/trace_events_inject.c b/kernel/trace/trace_events_inject.c +index 8650562bdaa98..a8f076809db4d 100644 +--- a/kernel/trace/trace_events_inject.c ++++ b/kernel/trace/trace_events_inject.c +@@ -299,7 +299,7 @@ event_inject_write(struct file *filp, const char __user *ubuf, size_t cnt, + strim(buf); + + mutex_lock(&event_mutex); +- file = event_file_data(filp); ++ file = event_file_file(filp); + if (file) { + call = file->event_call; + size = parse_entry(buf, call, &entry); +diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c +index b33c3861fbbbf..76abc9a45f971 100644 +--- a/kernel/trace/trace_events_trigger.c ++++ b/kernel/trace/trace_events_trigger.c +@@ -159,7 +159,7 @@ static void *trigger_start(struct seq_file *m, loff_t *pos) + + /* ->stop() is called even if ->start() fails */ + mutex_lock(&event_mutex); +- event_file = event_file_data(m->private); ++ event_file = event_file_file(m->private); + if (unlikely(!event_file)) + return ERR_PTR(-ENODEV); + +@@ -213,7 +213,7 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file) + + mutex_lock(&event_mutex); + +- if (unlikely(!event_file_data(file))) { ++ if (unlikely(!event_file_file(file))) { + mutex_unlock(&event_mutex); + return -ENODEV; + } +@@ -293,7 +293,7 @@ static ssize_t event_trigger_regex_write(struct file *file, + strim(buf); + + mutex_lock(&event_mutex); +- event_file = event_file_data(file); ++ event_file = event_file_file(file); + if (unlikely(!event_file)) { + mutex_unlock(&event_mutex); + kfree(buf); +diff --git a/mm/truncate.c b/mm/truncate.c +index 8e3aa9e8618ed..70c09213bb920 100644 +--- a/mm/truncate.c ++++ b/mm/truncate.c +@@ -174,7 +174,7 @@ static void truncate_cleanup_folio(struct folio *folio) + if (folio_mapped(folio)) + unmap_mapping_folio(folio); + +- if (folio_has_private(folio)) ++ if (folio_needs_release(folio)) + folio_invalidate(folio, 0, folio_size(folio)); + + /* +@@ -235,7 +235,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) + */ + folio_zero_range(folio, offset, length); + +- if (folio_has_private(folio)) ++ if (folio_needs_release(folio)) + folio_invalidate(folio, offset, length); + if (!folio_test_large(folio)) + return true; +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c +index e660b3d661dae..1b56355c40eaf 100644 +--- a/net/bluetooth/hci_core.c ++++ b/net/bluetooth/hci_core.c +@@ -2394,10 +2394,16 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action, + /* To avoid a potential race with hci_unregister_dev. */ + hci_dev_hold(hdev); + +- if (action == PM_SUSPEND_PREPARE) ++ switch (action) { ++ case PM_HIBERNATION_PREPARE: ++ case PM_SUSPEND_PREPARE: + ret = hci_suspend_dev(hdev); +- else if (action == PM_POST_SUSPEND) ++ break; ++ case PM_POST_HIBERNATION: ++ case PM_POST_SUSPEND: + ret = hci_resume_dev(hdev); ++ break; ++ } + + if (ret) + bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d", +diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c +index 5a9487af44e00..f7404bc679746 100644 +--- a/net/core/net-sysfs.c ++++ b/net/core/net-sysfs.c +@@ -216,7 +216,7 @@ static ssize_t speed_show(struct device *dev, + if (!rtnl_trylock()) + return restart_syscall(); + +- if (netif_running(netdev) && netif_device_present(netdev)) { ++ if (netif_running(netdev)) { + struct ethtool_link_ksettings cmd; + + if (!__ethtool_get_link_ksettings(netdev, &cmd)) +diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c +index 7cb23bcf8ef7a..4486cbe2faf0c 100644 +--- a/net/ethtool/ioctl.c ++++ b/net/ethtool/ioctl.c +@@ -438,6 +438,9 @@ int __ethtool_get_link_ksettings(struct net_device *dev, + if (!dev->ethtool_ops->get_link_ksettings) + return -EOPNOTSUPP; + ++ if (!netif_device_present(dev)) ++ return -ENODEV; ++ + memset(link_ksettings, 0, sizeof(*link_ksettings)); + return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings); + } +diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c +index 3346c615f3efe..c42496bcb5e09 100644 +--- a/net/mptcp/pm.c ++++ b/net/mptcp/pm.c +@@ -227,7 +227,9 @@ void mptcp_pm_add_addr_received(const struct sock *ssk, + } else { + __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP); + } +- } else if (!READ_ONCE(pm->accept_addr)) { ++ /* id0 should not have a different address */ ++ } else if ((addr->id == 0 && !mptcp_pm_nl_is_init_remote_addr(msk, addr)) || ++ (addr->id > 0 && !READ_ONCE(pm->accept_addr))) { + mptcp_pm_announce_addr(msk, addr, true); + mptcp_pm_add_addr_send_ack(msk); + } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) { +diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c +index 441af74621465..7dd10bacc8d28 100644 +--- a/net/mptcp/pm_netlink.c ++++ b/net/mptcp/pm_netlink.c +@@ -135,12 +135,15 @@ static bool lookup_subflow_by_daddr(const struct list_head *list, + { + struct mptcp_subflow_context *subflow; + struct mptcp_addr_info cur; +- struct sock_common *skc; + + list_for_each_entry(subflow, list, node) { +- skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow); ++ struct sock *ssk = mptcp_subflow_tcp_sock(subflow); ++ ++ if (!((1 << inet_sk_state_load(ssk)) & ++ (TCPF_ESTABLISHED | TCPF_SYN_SENT | TCPF_SYN_RECV))) ++ continue; + +- remote_address(skc, &cur); ++ remote_address((struct sock_common *)ssk, &cur); + if (mptcp_addresses_equal(&cur, daddr, daddr->port)) + return true; + } +@@ -593,6 +596,11 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) + + __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); + msk->pm.add_addr_signaled++; ++ ++ /* Special case for ID0: set the correct ID */ ++ if (local.addr.id == msk->mpc_endpoint_id) ++ local.addr.id = 0; ++ + mptcp_pm_announce_addr(msk, &local.addr, false); + mptcp_pm_nl_addr_send_ack(msk); + +@@ -615,8 +623,14 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) + + fullmesh = !!(local.flags & MPTCP_PM_ADDR_FLAG_FULLMESH); + +- msk->pm.local_addr_used++; + __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); ++ ++ /* Special case for ID0: set the correct ID */ ++ if (local.addr.id == msk->mpc_endpoint_id) ++ local.addr.id = 0; ++ else /* local_addr_used is not decr for ID 0 */ ++ msk->pm.local_addr_used++; ++ + nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs); + if (nr == 0) + continue; +@@ -745,13 +759,24 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk) + spin_lock_bh(&msk->pm.lock); + + if (sf_created) { +- msk->pm.add_addr_accepted++; ++ /* add_addr_accepted is not decr for ID 0 */ ++ if (remote.id) ++ msk->pm.add_addr_accepted++; + if (msk->pm.add_addr_accepted >= add_addr_accept_max || + msk->pm.subflows >= subflows_max) + WRITE_ONCE(msk->pm.accept_addr, false); + } + } + ++bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk, ++ const struct mptcp_addr_info *remote) ++{ ++ struct mptcp_addr_info mpc_remote; ++ ++ remote_address((struct sock_common *)msk, &mpc_remote); ++ return mptcp_addresses_equal(&mpc_remote, remote, remote->port); ++} ++ + void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk) + { + struct mptcp_subflow_context *subflow; +@@ -763,9 +788,12 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk) + !mptcp_pm_should_rm_signal(msk)) + return; + +- subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node); +- if (subflow) +- mptcp_pm_send_ack(msk, subflow, false, false); ++ mptcp_for_each_subflow(msk, subflow) { ++ if (__mptcp_subflow_active(subflow)) { ++ mptcp_pm_send_ack(msk, subflow, false, false); ++ break; ++ } ++ } + } + + int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, +@@ -835,6 +863,8 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk, + int how = RCV_SHUTDOWN | SEND_SHUTDOWN; + u8 id = subflow_get_local_id(subflow); + ++ if (inet_sk_state_load(ssk) == TCP_CLOSE) ++ continue; + if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id) + continue; + if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id)) +@@ -1338,20 +1368,27 @@ static struct pm_nl_pernet *genl_info_pm_nl(struct genl_info *info) + return pm_nl_get_pernet(genl_info_net(info)); + } + +-static int mptcp_nl_add_subflow_or_signal_addr(struct net *net) ++static int mptcp_nl_add_subflow_or_signal_addr(struct net *net, ++ struct mptcp_addr_info *addr) + { + struct mptcp_sock *msk; + long s_slot = 0, s_num = 0; + + while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { + struct sock *sk = (struct sock *)msk; ++ struct mptcp_addr_info mpc_addr; + + if (!READ_ONCE(msk->fully_established) || + mptcp_pm_is_userspace(msk)) + goto next; + ++ /* if the endp linked to the init sf is re-added with a != ID */ ++ mptcp_local_address((struct sock_common *)msk, &mpc_addr); ++ + lock_sock(sk); + spin_lock_bh(&msk->pm.lock); ++ if (mptcp_addresses_equal(addr, &mpc_addr, addr->port)) ++ msk->mpc_endpoint_id = addr->id; + mptcp_pm_create_subflow_or_signal_addr(msk); + spin_unlock_bh(&msk->pm.lock); + release_sock(sk); +@@ -1424,7 +1461,7 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info) + goto out_free; + } + +- mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk)); ++ mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk), &entry->addr); + return 0; + + out_free: +@@ -1540,6 +1577,8 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net, + spin_unlock_bh(&msk->pm.lock); + } + ++ if (msk->mpc_endpoint_id == entry->addr.id) ++ msk->mpc_endpoint_id = 0; + release_sock(sk); + + next: +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c +index c1d652a3f7a9b..0ae684624c820 100644 +--- a/net/mptcp/protocol.c ++++ b/net/mptcp/protocol.c +@@ -2289,7 +2289,7 @@ struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk) + continue; + } + +- if (subflow->backup) { ++ if (subflow->backup || subflow->request_bkup) { + if (!backup) + backup = ssk; + continue; +@@ -2496,8 +2496,11 @@ static void __mptcp_close_subflow(struct sock *sk) + + mptcp_for_each_subflow_safe(msk, subflow, tmp) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); ++ int ssk_state = inet_sk_state_load(ssk); + +- if (inet_sk_state_load(ssk) != TCP_CLOSE) ++ if (ssk_state != TCP_CLOSE && ++ (ssk_state != TCP_CLOSE_WAIT || ++ inet_sk_state_load(sk) != TCP_ESTABLISHED)) + continue; + + /* 'subflow_data_ready' will re-sched once rx queue is empty */ +diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h +index 89dda24537d9e..20736f31dc534 100644 +--- a/net/mptcp/protocol.h ++++ b/net/mptcp/protocol.h +@@ -907,6 +907,8 @@ void mptcp_pm_add_addr_received(const struct sock *ssk, + void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk, + const struct mptcp_addr_info *addr); + void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk); ++bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk, ++ const struct mptcp_addr_info *remote); + void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk); + void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, + const struct mptcp_rm_list *rm_list); +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c +index 927c2d5997dc7..81050aa55f7b9 100644 +--- a/net/mptcp/subflow.c ++++ b/net/mptcp/subflow.c +@@ -1226,12 +1226,16 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb, + /* sched mptcp worker to remove the subflow if no more data is pending */ + static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk) + { +- if (likely(ssk->sk_state != TCP_CLOSE)) ++ struct sock *sk = (struct sock *)msk; ++ ++ if (likely(ssk->sk_state != TCP_CLOSE && ++ (ssk->sk_state != TCP_CLOSE_WAIT || ++ inet_sk_state_load(sk) != TCP_ESTABLISHED))) + return; + + if (skb_queue_empty(&ssk->sk_receive_queue) && + !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) +- mptcp_schedule_work((struct sock *)msk); ++ mptcp_schedule_work(sk); + } + + static bool subflow_can_fallback(struct mptcp_subflow_context *subflow) +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c +index 08fdf1251f46a..3649a4e1eb9de 100644 +--- a/net/sctp/sm_statefuns.c ++++ b/net/sctp/sm_statefuns.c +@@ -2259,12 +2259,6 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook( + } + } + +- /* Update socket peer label if first association. */ +- if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) { +- sctp_association_free(new_asoc); +- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); +- } +- + /* Set temp so that it won't be added into hashtable */ + new_asoc->temp = 1; + +@@ -2273,6 +2267,22 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook( + */ + action = sctp_tietags_compare(new_asoc, asoc); + ++ /* In cases C and E the association doesn't enter the ESTABLISHED ++ * state, so there is no need to call security_sctp_assoc_request(). ++ */ ++ switch (action) { ++ case 'A': /* Association restart. */ ++ case 'B': /* Collision case B. */ ++ case 'D': /* Collision case D. */ ++ /* Update socket peer label if first association. */ ++ if (security_sctp_assoc_request((struct sctp_association *)asoc, ++ chunk->head_skb ?: chunk->skb)) { ++ sctp_association_free(new_asoc); ++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); ++ } ++ break; ++ } ++ + switch (action) { + case 'A': /* Association restart. */ + retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands, +diff --git a/security/apparmor/policy_unpack_test.c b/security/apparmor/policy_unpack_test.c +index 5c9bde25e56df..2b8003eb4f463 100644 +--- a/security/apparmor/policy_unpack_test.c ++++ b/security/apparmor/policy_unpack_test.c +@@ -80,14 +80,14 @@ static struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf, + *(buf + 1) = strlen(TEST_U32_NAME) + 1; + strscpy(buf + 3, TEST_U32_NAME, e->end - (void *)(buf + 3)); + *(buf + 3 + strlen(TEST_U32_NAME) + 1) = AA_U32; +- *((u32 *)(buf + 3 + strlen(TEST_U32_NAME) + 2)) = TEST_U32_DATA; ++ *((__le32 *)(buf + 3 + strlen(TEST_U32_NAME) + 2)) = cpu_to_le32(TEST_U32_DATA); + + buf = e->start + TEST_NAMED_U64_BUF_OFFSET; + *buf = AA_NAME; + *(buf + 1) = strlen(TEST_U64_NAME) + 1; + strscpy(buf + 3, TEST_U64_NAME, e->end - (void *)(buf + 3)); + *(buf + 3 + strlen(TEST_U64_NAME) + 1) = AA_U64; +- *((u64 *)(buf + 3 + strlen(TEST_U64_NAME) + 2)) = TEST_U64_DATA; ++ *((__le64 *)(buf + 3 + strlen(TEST_U64_NAME) + 2)) = cpu_to_le64(TEST_U64_DATA); + + buf = e->start + TEST_NAMED_BLOB_BUF_OFFSET; + *buf = AA_NAME; +@@ -103,7 +103,7 @@ static struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf, + *(buf + 1) = strlen(TEST_ARRAY_NAME) + 1; + strscpy(buf + 3, TEST_ARRAY_NAME, e->end - (void *)(buf + 3)); + *(buf + 3 + strlen(TEST_ARRAY_NAME) + 1) = AA_ARRAY; +- *((u16 *)(buf + 3 + strlen(TEST_ARRAY_NAME) + 2)) = TEST_ARRAY_SIZE; ++ *((__le16 *)(buf + 3 + strlen(TEST_ARRAY_NAME) + 2)) = cpu_to_le16(TEST_ARRAY_SIZE); + + return e; + } +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c +index d32d16d75795a..d4a99d98ec774 100644 +--- a/security/selinux/hooks.c ++++ b/security/selinux/hooks.c +@@ -6553,8 +6553,8 @@ static int selinux_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen + */ + static int selinux_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen) + { +- return __vfs_setxattr_noperm(&nop_mnt_idmap, dentry, XATTR_NAME_SELINUX, +- ctx, ctxlen, 0); ++ return __vfs_setxattr_locked(&nop_mnt_idmap, dentry, XATTR_NAME_SELINUX, ++ ctx, ctxlen, 0, NULL); + } + + static int selinux_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) +diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c +index 49d9da878ac61..6b92e09d3f780 100644 +--- a/security/smack/smack_lsm.c ++++ b/security/smack/smack_lsm.c +@@ -4772,8 +4772,8 @@ static int smack_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen) + + static int smack_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen) + { +- return __vfs_setxattr_noperm(&nop_mnt_idmap, dentry, XATTR_NAME_SMACK, +- ctx, ctxlen, 0); ++ return __vfs_setxattr_locked(&nop_mnt_idmap, dentry, XATTR_NAME_SMACK, ++ ctx, ctxlen, 0, NULL); + } + + static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c +index 42a7051410501..e115fe1836349 100644 +--- a/sound/core/seq/seq_clientmgr.c ++++ b/sound/core/seq/seq_clientmgr.c +@@ -537,6 +537,9 @@ static struct snd_seq_client *get_event_dest_client(struct snd_seq_event *event, + return NULL; + if (! dest->accept_input) + goto __not_avail; ++ if (snd_seq_ev_is_ump(event)) ++ return dest; /* ok - no filter checks */ ++ + if ((dest->filter & SNDRV_SEQ_FILTER_USE_EVENT) && + ! test_bit(event->type, dest->event_filter)) + goto __not_avail; +diff --git a/sound/soc/amd/acp/acp-legacy-mach.c b/sound/soc/amd/acp/acp-legacy-mach.c +index 6d57d17ddfd77..6e820c2edd1d8 100644 +--- a/sound/soc/amd/acp/acp-legacy-mach.c ++++ b/sound/soc/amd/acp/acp-legacy-mach.c +@@ -137,6 +137,8 @@ static const struct platform_device_id board_ids[] = { + }, + { } + }; ++MODULE_DEVICE_TABLE(platform, board_ids); ++ + static struct platform_driver acp_asoc_audio = { + .driver = { + .pm = &snd_soc_pm_ops, +diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c +index add386f749ae9..bfed848de77c8 100644 +--- a/sound/soc/sof/amd/acp.c ++++ b/sound/soc/sof/amd/acp.c +@@ -380,6 +380,7 @@ static int acp_power_on(struct snd_sof_dev *sdev) + const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); + unsigned int base = desc->pgfsm_base; + unsigned int val; ++ unsigned int acp_pgfsm_status_mask, acp_pgfsm_cntl_mask; + int ret; + + val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET); +@@ -387,9 +388,23 @@ static int acp_power_on(struct snd_sof_dev *sdev) + if (val == ACP_POWERED_ON) + return 0; + +- if (val & ACP_PGFSM_STATUS_MASK) ++ switch (desc->rev) { ++ case 3: ++ case 5: ++ acp_pgfsm_status_mask = ACP3X_PGFSM_STATUS_MASK; ++ acp_pgfsm_cntl_mask = ACP3X_PGFSM_CNTL_POWER_ON_MASK; ++ break; ++ case 6: ++ acp_pgfsm_status_mask = ACP6X_PGFSM_STATUS_MASK; ++ acp_pgfsm_cntl_mask = ACP6X_PGFSM_CNTL_POWER_ON_MASK; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (val & acp_pgfsm_status_mask) + snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + PGFSM_CONTROL_OFFSET, +- ACP_PGFSM_CNTL_POWER_ON_MASK); ++ acp_pgfsm_cntl_mask); + + ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET, val, + !val, ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US); +diff --git a/sound/soc/sof/amd/acp.h b/sound/soc/sof/amd/acp.h +index 4dcceb7647694..133abed74f015 100644 +--- a/sound/soc/sof/amd/acp.h ++++ b/sound/soc/sof/amd/acp.h +@@ -25,8 +25,11 @@ + #define ACP_REG_POLL_TIMEOUT_US 2000 + #define ACP_DMA_COMPLETE_TIMEOUT_US 5000 + +-#define ACP_PGFSM_CNTL_POWER_ON_MASK 0x01 +-#define ACP_PGFSM_STATUS_MASK 0x03 ++#define ACP3X_PGFSM_CNTL_POWER_ON_MASK 0x01 ++#define ACP3X_PGFSM_STATUS_MASK 0x03 ++#define ACP6X_PGFSM_CNTL_POWER_ON_MASK 0x07 ++#define ACP6X_PGFSM_STATUS_MASK 0x0F ++ + #define ACP_POWERED_ON 0x00 + #define ACP_ASSERT_RESET 0x01 + #define ACP_RELEASE_RESET 0x00 +diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c +index 33d08600be13d..890a81f4ff618 100644 +--- a/tools/testing/selftests/iommu/iommufd.c ++++ b/tools/testing/selftests/iommu/iommufd.c +@@ -531,7 +531,7 @@ TEST_F(iommufd_ioas, copy_area) + { + struct iommu_ioas_copy copy_cmd = { + .size = sizeof(copy_cmd), +- .flags = IOMMU_IOAS_MAP_FIXED_IOVA, ++ .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE, + .dst_ioas_id = self->ioas_id, + .src_ioas_id = self->ioas_id, + .length = PAGE_SIZE, +@@ -1024,7 +1024,7 @@ TEST_F(iommufd_ioas, copy_sweep) + { + struct iommu_ioas_copy copy_cmd = { + .size = sizeof(copy_cmd), +- .flags = IOMMU_IOAS_MAP_FIXED_IOVA, ++ .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE, + .src_ioas_id = self->ioas_id, + .dst_iova = MOCK_APERTURE_START, + .length = MOCK_PAGE_SIZE, +@@ -1314,7 +1314,7 @@ TEST_F(iommufd_mock_domain, user_copy) + }; + struct iommu_ioas_copy copy_cmd = { + .size = sizeof(copy_cmd), +- .flags = IOMMU_IOAS_MAP_FIXED_IOVA, ++ .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE, + .dst_ioas_id = self->ioas_id, + .dst_iova = MOCK_APERTURE_START, + .length = BUFFER_SIZE, +diff --git a/tools/testing/selftests/net/forwarding/local_termination.sh b/tools/testing/selftests/net/forwarding/local_termination.sh +index c5b0cbc85b3e0..9b5a63519b949 100755 +--- a/tools/testing/selftests/net/forwarding/local_termination.sh ++++ b/tools/testing/selftests/net/forwarding/local_termination.sh +@@ -278,6 +278,10 @@ bridge() + cleanup() + { + pre_cleanup ++ ++ ip link set $h2 down ++ ip link set $h1 down ++ + vrf_cleanup + } + +diff --git a/tools/testing/selftests/net/forwarding/no_forwarding.sh b/tools/testing/selftests/net/forwarding/no_forwarding.sh +index af3b398d13f01..9e677aa64a06a 100755 +--- a/tools/testing/selftests/net/forwarding/no_forwarding.sh ++++ b/tools/testing/selftests/net/forwarding/no_forwarding.sh +@@ -233,6 +233,9 @@ cleanup() + { + pre_cleanup + ++ ip link set dev $swp2 down ++ ip link set dev $swp1 down ++ + h2_destroy + h1_destroy + +diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh +index e8726ec658ae4..2be13dd19ddd2 100755 +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh +@@ -1256,26 +1256,26 @@ chk_csum_nr() + + print_check "sum" + count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtDataCsumErr") +- if [ "$count" != "$csum_ns1" ]; then ++ if [ -n "$count" ] && [ "$count" != "$csum_ns1" ]; then + extra_msg+=" ns1=$count" + fi + if [ -z "$count" ]; then + print_skip + elif { [ "$count" != $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 0 ]; } || +- { [ "$count" -lt $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 1 ]; }; then ++ { [ "$count" -lt $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 1 ]; }; then + fail_test "got $count data checksum error[s] expected $csum_ns1" + else + print_ok + fi + print_check "csum" + count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtDataCsumErr") +- if [ "$count" != "$csum_ns2" ]; then ++ if [ -n "$count" ] && [ "$count" != "$csum_ns2" ]; then + extra_msg+=" ns2=$count" + fi + if [ -z "$count" ]; then + print_skip + elif { [ "$count" != $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 0 ]; } || +- { [ "$count" -lt $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 1 ]; }; then ++ { [ "$count" -lt $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 1 ]; }; then + fail_test "got $count data checksum error[s] expected $csum_ns2" + else + print_ok +@@ -1313,13 +1313,13 @@ chk_fail_nr() + + print_check "ftx" + count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPFailTx") +- if [ "$count" != "$fail_tx" ]; then ++ if [ -n "$count" ] && [ "$count" != "$fail_tx" ]; then + extra_msg+=",tx=$count" + fi + if [ -z "$count" ]; then + print_skip + elif { [ "$count" != "$fail_tx" ] && [ $allow_tx_lost -eq 0 ]; } || +- { [ "$count" -gt "$fail_tx" ] && [ $allow_tx_lost -eq 1 ]; }; then ++ { [ "$count" -gt "$fail_tx" ] && [ $allow_tx_lost -eq 1 ]; }; then + fail_test "got $count MP_FAIL[s] TX expected $fail_tx" + else + print_ok +@@ -1327,13 +1327,13 @@ chk_fail_nr() + + print_check "failrx" + count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPFailRx") +- if [ "$count" != "$fail_rx" ]; then ++ if [ -n "$count" ] && [ "$count" != "$fail_rx" ]; then + extra_msg+=",rx=$count" + fi + if [ -z "$count" ]; then + print_skip + elif { [ "$count" != "$fail_rx" ] && [ $allow_rx_lost -eq 0 ]; } || +- { [ "$count" -gt "$fail_rx" ] && [ $allow_rx_lost -eq 1 ]; }; then ++ { [ "$count" -gt "$fail_rx" ] && [ $allow_rx_lost -eq 1 ]; }; then + fail_test "got $count MP_FAIL[s] RX expected $fail_rx" + else + print_ok +@@ -3578,27 +3578,28 @@ endpoint_tests() + + if reset_with_tcp_filter "delete and re-add" ns2 10.0.3.2 REJECT OUTPUT && + mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then +- pm_nl_set_limits $ns1 0 2 +- pm_nl_set_limits $ns2 0 2 ++ pm_nl_set_limits $ns1 0 3 ++ pm_nl_set_limits $ns2 0 3 ++ pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow + pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow +- test_linkfail=4 speed=20 \ ++ test_linkfail=4 speed=5 \ + run_tests $ns1 $ns2 10.0.1.1 & + local tests_pid=$! + + wait_mpj $ns2 + pm_nl_check_endpoint "creation" \ + $ns2 10.0.2.2 id 2 flags subflow dev ns2eth2 +- chk_subflow_nr "before delete" 2 ++ chk_subflow_nr "before delete id 2" 2 + chk_mptcp_info subflows 1 subflows 1 + + pm_nl_del_endpoint $ns2 2 10.0.2.2 + sleep 0.5 +- chk_subflow_nr "after delete" 1 ++ chk_subflow_nr "after delete id 2" 1 + chk_mptcp_info subflows 0 subflows 0 + + pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow + wait_mpj $ns2 +- chk_subflow_nr "after re-add" 2 ++ chk_subflow_nr "after re-add id 2" 2 + chk_mptcp_info subflows 1 subflows 1 + + pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow +@@ -3613,10 +3614,23 @@ endpoint_tests() + chk_subflow_nr "after no reject" 3 + chk_mptcp_info subflows 2 subflows 2 + ++ local i ++ for i in $(seq 3); do ++ pm_nl_del_endpoint $ns2 1 10.0.1.2 ++ sleep 0.5 ++ chk_subflow_nr "after delete id 0 ($i)" 2 ++ chk_mptcp_info subflows 2 subflows 2 # only decr for additional sf ++ ++ pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow ++ wait_mpj $ns2 ++ chk_subflow_nr "after re-add id 0 ($i)" 3 ++ chk_mptcp_info subflows 3 subflows 3 ++ done ++ + mptcp_lib_kill_wait $tests_pid + +- chk_join_nr 3 3 3 +- chk_rm_nr 1 1 ++ chk_join_nr 6 6 6 ++ chk_rm_nr 4 4 + fi + } + diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.49-50.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.49-50.patch new file mode 100644 index 000000000000..16d4e5c59539 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.49-50.patch @@ -0,0 +1,5960 @@ +diff --git a/Documentation/locking/hwspinlock.rst b/Documentation/locking/hwspinlock.rst +index 6f03713b70039..2ffaa3cbd63f1 100644 +--- a/Documentation/locking/hwspinlock.rst ++++ b/Documentation/locking/hwspinlock.rst +@@ -85,6 +85,17 @@ is already free). + + Should be called from a process context (might sleep). + ++:: ++ ++ int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id); ++ ++After verifying the owner of the hwspinlock, release a previously acquired ++hwspinlock; returns 0 on success, or an appropriate error code on failure ++(e.g. -EOPNOTSUPP if the bust operation is not defined for the specific ++hwspinlock). ++ ++Should be called from a process context (might sleep). ++ + :: + + int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout); +diff --git a/Makefile b/Makefile +index 008fa9137c732..f7efbb59c9865 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 49 ++SUBLEVEL = 50 + EXTRAVERSION = + NAME = Hurr durr I'ma ninja sloth + +diff --git a/block/blk-integrity.c b/block/blk-integrity.c +index d4e9b4556d14b..5276c556a9df9 100644 +--- a/block/blk-integrity.c ++++ b/block/blk-integrity.c +@@ -396,8 +396,6 @@ void blk_integrity_unregister(struct gendisk *disk) + if (!bi->profile) + return; + +- /* ensure all bios are off the integrity workqueue */ +- blk_flush_integrity(); + blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue); + memset(bi, 0, sizeof(*bi)); + } +diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c +index 37ab23a9d0345..7f14c5ed1e229 100644 +--- a/drivers/base/regmap/regmap-spi.c ++++ b/drivers/base/regmap/regmap-spi.c +@@ -122,8 +122,7 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi, + return ERR_PTR(-ENOMEM); + + max_msg_size = spi_max_message_size(spi); +- reg_reserve_size = config->reg_bits / BITS_PER_BYTE +- + config->pad_bits / BITS_PER_BYTE; ++ reg_reserve_size = (config->reg_bits + config->pad_bits) / BITS_PER_BYTE; + if (max_size + reg_reserve_size > max_msg_size) + max_size -= reg_reserve_size; + +diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c +index 028df8a5f537a..079940c69ee0b 100644 +--- a/drivers/cpufreq/scmi-cpufreq.c ++++ b/drivers/cpufreq/scmi-cpufreq.c +@@ -62,9 +62,9 @@ static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy, + unsigned int target_freq) + { + struct scmi_data *priv = policy->driver_data; ++ unsigned long freq = target_freq; + +- if (!perf_ops->freq_set(ph, priv->domain_id, +- target_freq * 1000, true)) ++ if (!perf_ops->freq_set(ph, priv->domain_id, freq * 1000, true)) + return target_freq; + + return 0; +diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c +index f095f0065428a..2f1b82cf10b1c 100644 +--- a/drivers/crypto/stm32/stm32-cryp.c ++++ b/drivers/crypto/stm32/stm32-cryp.c +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -1665,8 +1666,11 @@ static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg) + it_mask &= ~IMSCR_OUT; + stm32_cryp_write(cryp, cryp->caps->imsc, it_mask); + +- if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out) ++ if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out) { ++ local_bh_disable(); + stm32_cryp_finish_req(cryp, 0); ++ local_bh_enable(); ++ } + + return IRQ_HANDLED; + } +diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c +index 4153c2edb0490..711e3756a39a5 100644 +--- a/drivers/dma/altera-msgdma.c ++++ b/drivers/dma/altera-msgdma.c +@@ -233,7 +233,7 @@ static void msgdma_free_descriptor(struct msgdma_device *mdev, + struct msgdma_sw_desc *child, *next; + + mdev->desc_free_cnt++; +- list_add_tail(&desc->node, &mdev->free_list); ++ list_move_tail(&desc->node, &mdev->free_list); + list_for_each_entry_safe(child, next, &desc->tx_list, node) { + mdev->desc_free_cnt++; + list_move_tail(&child->node, &mdev->free_list); +@@ -583,17 +583,16 @@ static void msgdma_issue_pending(struct dma_chan *chan) + static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev) + { + struct msgdma_sw_desc *desc, *next; ++ unsigned long irqflags; + + list_for_each_entry_safe(desc, next, &mdev->done_list, node) { + struct dmaengine_desc_callback cb; + +- list_del(&desc->node); +- + dmaengine_desc_get_callback(&desc->async_tx, &cb); + if (dmaengine_desc_callback_valid(&cb)) { +- spin_unlock(&mdev->lock); ++ spin_unlock_irqrestore(&mdev->lock, irqflags); + dmaengine_desc_callback_invoke(&cb, NULL); +- spin_lock(&mdev->lock); ++ spin_lock_irqsave(&mdev->lock, irqflags); + } + + /* Run any dependencies, then free the descriptor */ +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c +index a4d65973bf7cf..80771b1480fff 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c +@@ -100,6 +100,7 @@ struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock) + amdgpu_afmt_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000); + amdgpu_afmt_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100); + amdgpu_afmt_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000); ++ res.clock = clock; + + return res; + } +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +index 9d72bb0a0eaec..a1f35510d5395 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +@@ -407,6 +407,10 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, + "Called with userptr BO")) + return -EINVAL; + ++ /* bo has been pinned, not need validate it */ ++ if (bo->tbo.pin_count) ++ return 0; ++ + amdgpu_bo_placement_from_domain(bo, domain); + + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); +@@ -2631,7 +2635,7 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i + + /* keep mem without hmm range at userptr_inval_list */ + if (!mem->range) +- continue; ++ continue; + + /* Only check mem with hmm range associated */ + valid = amdgpu_ttm_tt_get_user_pages_done( +@@ -2848,9 +2852,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) + if (!attachment->is_mapped) + continue; + +- if (attachment->bo_va->base.bo->tbo.pin_count) +- continue; +- + kfd_mem_dmaunmap_attachment(mem, attachment); + ret = update_gpuvm_pte(mem, attachment, &sync_obj); + if (ret) { +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +index dce9e7d5e4ec6..a14a54a734c12 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +@@ -1476,6 +1476,8 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev, + (u32)le32_to_cpu(*((u32 *)reg_data + j)); + j++; + } else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) { ++ if (i == 0) ++ continue; + reg_table->mc_reg_table_entry[num_ranges].mc_data[i] = + reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1]; + } +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +index b8280be6225d9..c3d89088123db 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +@@ -213,6 +213,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, + struct amdgpu_firmware_info *ucode; + + id = fw_type_convert(cgs_device, type); ++ if (id >= AMDGPU_UCODE_ID_MAXIMUM) ++ return -EINVAL; ++ + ucode = &adev->firmware.ucode[id]; + if (ucode->fw == NULL) + return -EINVAL; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +index eb663eb811563..9c99d69b4b083 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +@@ -4480,7 +4480,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) + shadow = vmbo->shadow; + + /* No need to recover an evicted BO */ +- if (shadow->tbo.resource->mem_type != TTM_PL_TT || ++ if (!shadow->tbo.resource || ++ shadow->tbo.resource->mem_type != TTM_PL_TT || + shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET || + shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM) + continue; +@@ -5235,7 +5236,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, + * to put adev in the 1st position. + */ + INIT_LIST_HEAD(&device_list); +- if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) { ++ if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) { + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + list_add_tail(&tmp_adev->reset_list, &device_list); + if (gpu_reset_for_dev_remove && adev->shutdown) +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +index cf2faeae1d0db..b04d789bfd100 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +@@ -1550,7 +1550,7 @@ static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev) + break; + case 2: + mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc); +- adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc; ++ adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc; + break; + default: + dev_err(adev->dev, +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c +index e71768661ca8d..09a34c7258e22 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c +@@ -179,7 +179,7 @@ static int __amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr, + * Returns the number of bytes read/written; -errno on error. + */ + static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr, +- u8 *eeprom_buf, u16 buf_size, bool read) ++ u8 *eeprom_buf, u32 buf_size, bool read) + { + const struct i2c_adapter_quirks *quirks = i2c_adap->quirks; + u16 limit; +@@ -225,7 +225,7 @@ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr, + + int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap, + u32 eeprom_addr, u8 *eeprom_buf, +- u16 bytes) ++ u32 bytes) + { + return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes, + true); +@@ -233,7 +233,7 @@ int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap, + + int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap, + u32 eeprom_addr, u8 *eeprom_buf, +- u16 bytes) ++ u32 bytes) + { + return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes, + false); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h +index 6935adb2be1f1..8083b8253ef43 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h +@@ -28,10 +28,10 @@ + + int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap, + u32 eeprom_addr, u8 *eeprom_buf, +- u16 bytes); ++ u32 bytes); + + int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap, + u32 eeprom_addr, u8 *eeprom_buf, +- u16 bytes); ++ u32 bytes); + + #endif +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +index 73b8cca35bab8..eace2c9d0c362 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +@@ -34,6 +34,7 @@ + #include + #endif + #include "amdgpu.h" ++#include "amdgpu_reset.h" + #include + #include + +@@ -400,7 +401,10 @@ void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev) + return; + + mb(); +- amdgpu_device_flush_hdp(adev, NULL); ++ if (down_read_trylock(&adev->reset_domain->sem)) { ++ amdgpu_device_flush_hdp(adev, NULL); ++ up_read(&adev->reset_domain->sem); ++ } + for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) + amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0); + } +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +index 429ef212c1f25..a4f9015345ccb 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +@@ -1336,6 +1336,9 @@ static void psp_xgmi_reflect_topology_info(struct psp_context *psp, + uint8_t dst_num_links = node_info.num_links; + + hive = amdgpu_get_xgmi_hive(psp->adev); ++ if (WARN_ON(!hive)) ++ return; ++ + list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) { + struct psp_xgmi_topology_info *mirror_top_info; + int j; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +index dbde3b41c0883..f44b303ae287a 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +@@ -352,7 +352,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, + ring->max_dw = max_dw; + ring->hw_prio = hw_prio; + +- if (!ring->no_scheduler) { ++ if (!ring->no_scheduler && ring->funcs->type < AMDGPU_HW_IP_NUM) { + hw_ip = ring->funcs->type; + num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds; + adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] = +@@ -469,8 +469,9 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) + { + struct amdgpu_ring *ring = file_inode(f)->i_private; +- int r, i; + uint32_t value, result, early[3]; ++ loff_t i; ++ int r; + + if (*pos & 3 || size & 3) + return -EINVAL; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c +index 8ed0e073656f8..41ebe690eeffa 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c +@@ -135,6 +135,10 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u + mutex_unlock(&psp->securedisplay_context.mutex); + break; + case 2: ++ if (size < 3 || phy_id >= TA_SECUREDISPLAY_MAX_PHY) { ++ dev_err(adev->dev, "Invalid input: %s\n", str); ++ return -EINVAL; ++ } + mutex_lock(&psp->securedisplay_context.mutex); + psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, + TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +index ff4f52e07cc0d..d9dc675b46aed 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +@@ -615,7 +615,7 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) + vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr; + vf2pf_info->checksum = + amd_sriov_msg_checksum( +- vf2pf_info, vf2pf_info->header.size, 0, 0); ++ vf2pf_info, sizeof(*vf2pf_info), 0, 0); + + return 0; + } +@@ -998,6 +998,9 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v + return 0; + } + ++ if (amdgpu_device_skip_hw_access(adev)) ++ return 0; ++ + reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id]; + scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0; + scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1; +@@ -1073,6 +1076,9 @@ void amdgpu_sriov_wreg(struct amdgpu_device *adev, + { + u32 rlcg_flag; + ++ if (amdgpu_device_skip_hw_access(adev)) ++ return; ++ + if (!amdgpu_sriov_runtime(adev) && + amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) { + amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id); +@@ -1090,6 +1096,9 @@ u32 amdgpu_sriov_rreg(struct amdgpu_device *adev, + { + u32 rlcg_flag; + ++ if (amdgpu_device_skip_hw_access(adev)) ++ return 0; ++ + if (!amdgpu_sriov_runtime(adev) && + amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag)) + return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id); +diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +index 0284c9198a04a..6c6f9d9b5d897 100644 +--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c ++++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +@@ -500,6 +500,12 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, + + if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) { + mode = __aqua_vanjaram_get_auto_mode(xcp_mgr); ++ if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) { ++ dev_err(adev->dev, ++ "Invalid config, no compatible compute partition mode found, available memory partitions: %d", ++ adev->gmc.num_mem_partitions); ++ return -EINVAL; ++ } + } else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) { + dev_err(adev->dev, + "Invalid compute partition mode requested, requested: %s, available memory partitions: %d", +diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c +index 5dfab80ffff21..cd298556f7a60 100644 +--- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c ++++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c +@@ -70,6 +70,8 @@ static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev) + int fb_channel_number; + + fb_channel_number = adev->df.funcs->get_fb_channel_number(adev); ++ if (fb_channel_number >= ARRAY_SIZE(df_v1_7_channel_number)) ++ fb_channel_number = 0; + + return df_v1_7_channel_number[fb_channel_number]; + } +diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +index 685abf57ffddc..977b956bf930a 100644 +--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c ++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +@@ -384,7 +384,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device + else + WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + +- if (!ras->disable_ras_err_cnt_harvest) { ++ if (ras && !ras->disable_ras_err_cnt_harvest && obj) { + /* + * clear error status after ras_controller_intr + * according to hw team and count ue number +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h +index 74c2d7a0d6285..2f54ee08f2696 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h +@@ -42,8 +42,6 @@ + #define CRAT_OEMTABLEID_LENGTH 8 + #define CRAT_RESERVED_LENGTH 6 + +-#define CRAT_OEMID_64BIT_MASK ((1ULL << (CRAT_OEMID_LENGTH * 8)) - 1) +- + /* Compute Unit flags */ + #define COMPUTE_UNIT_CPU (1 << 0) /* Create Virtual CRAT for CPU */ + #define COMPUTE_UNIT_GPU (1 << 1) /* Create Virtual CRAT for GPU */ +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +index 9ec750666382f..94aaf2fc556ca 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +@@ -103,7 +103,8 @@ void debug_event_write_work_handler(struct work_struct *work) + struct kfd_process, + debug_event_workarea); + +- kernel_write(process->dbg_ev_file, &write_data, 1, &pos); ++ if (process->debug_trap_enabled && process->dbg_ev_file) ++ kernel_write(process->dbg_ev_file, &write_data, 1, &pos); + } + + /* update process/device/queue exception status, write to descriptor +@@ -645,6 +646,7 @@ int kfd_dbg_trap_disable(struct kfd_process *target) + else if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED) + target->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED; + ++ cancel_work_sync(&target->debug_event_workarea); + fput(target->dbg_ev_file); + target->dbg_ev_file = NULL; + +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +index 43eff221eae58..8aca92624a77e 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +@@ -28,6 +28,7 @@ + #include "kfd_priv.h" + #include "kfd_kernel_queue.h" + #include "amdgpu_amdkfd.h" ++#include "amdgpu_reset.h" + + static inline struct process_queue_node *get_queue_by_qid( + struct process_queue_manager *pqm, unsigned int qid) +@@ -87,8 +88,12 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd) + return; + + dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd); +- if (dev->kfd->shared_resources.enable_mes) +- amdgpu_mes_flush_shader_debugger(dev->adev, pdd->proc_ctx_gpu_addr); ++ if (dev->kfd->shared_resources.enable_mes && ++ down_read_trylock(&dev->adev->reset_domain->sem)) { ++ amdgpu_mes_flush_shader_debugger(dev->adev, ++ pdd->proc_ctx_gpu_addr); ++ up_read(&dev->adev->reset_domain->sem); ++ } + pdd->already_dequeued = true; + } + +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +index 61157fddc15c7..8362a71ab7075 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +@@ -958,8 +958,7 @@ static void kfd_update_system_properties(void) + dev = list_last_entry(&topology_device_list, + struct kfd_topology_device, list); + if (dev) { +- sys_props.platform_id = +- (*((uint64_t *)dev->oem_id)) & CRAT_OEMID_64BIT_MASK; ++ sys_props.platform_id = dev->oem_id64; + sys_props.platform_oem = *((uint64_t *)dev->oem_table_id); + sys_props.platform_rev = dev->oem_revision; + } +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +index 27386ce9a021d..2d1c9d771bef2 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +@@ -154,7 +154,10 @@ struct kfd_topology_device { + struct attribute attr_gpuid; + struct attribute attr_name; + struct attribute attr_props; +- uint8_t oem_id[CRAT_OEMID_LENGTH]; ++ union { ++ uint8_t oem_id[CRAT_OEMID_LENGTH]; ++ uint64_t oem_id64; ++ }; + uint8_t oem_table_id[CRAT_OEMTABLEID_LENGTH]; + uint32_t oem_revision; + }; +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index 94059aef762be..44c1556838240 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -4357,7 +4357,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) + + /* There is one primary plane per CRTC */ + primary_planes = dm->dc->caps.max_streams; +- ASSERT(primary_planes <= AMDGPU_MAX_PLANES); ++ if (primary_planes > AMDGPU_MAX_PLANES) { ++ DRM_ERROR("DM: Plane nums out of 6 planes\n"); ++ return -EINVAL; ++ } + + /* + * Initialize primary planes, implicit planes for legacy IOCTLS. +@@ -8283,15 +8286,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, + bundle->stream_update.vrr_infopacket = + &acrtc_state->stream->vrr_infopacket; + } +- } else if (cursor_update && acrtc_state->active_planes > 0 && +- acrtc_attach->base.state->event) { +- drm_crtc_vblank_get(pcrtc); +- ++ } else if (cursor_update && acrtc_state->active_planes > 0) { + spin_lock_irqsave(&pcrtc->dev->event_lock, flags); +- +- acrtc_attach->event = acrtc_attach->base.state->event; +- acrtc_attach->base.state->event = NULL; +- ++ if (acrtc_attach->base.state->event) { ++ drm_crtc_vblank_get(pcrtc); ++ acrtc_attach->event = acrtc_attach->base.state->event; ++ acrtc_attach->base.state->event = NULL; ++ } + spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); + } + +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +index 9e4cc5eeda767..88606b805330d 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +@@ -49,7 +49,7 @@ + + #define AMDGPU_DM_MAX_NUM_EDP 2 + +-#define AMDGPU_DMUB_NOTIFICATION_MAX 5 ++#define AMDGPU_DMUB_NOTIFICATION_MAX 6 + + #define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A + #define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40 +diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +index 6b31904475815..684b005f564c4 100644 +--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c ++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +@@ -667,6 +667,9 @@ static enum bp_result get_ss_info_v3_1( + ss_table_header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V3 *) bios_get_image(&bp->base, + DATA_TABLES(ASIC_InternalSS_Info), + struct_size(ss_table_header_include, asSpreadSpectrum, 1))); ++ if (!ss_table_header_include) ++ return BP_RESULT_UNSUPPORTED; ++ + table_size = + (le16_to_cpu(ss_table_header_include->sHeader.usStructureSize) + - sizeof(ATOM_COMMON_TABLE_HEADER)) +@@ -1036,6 +1039,8 @@ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1( + &bp->base, + DATA_TABLES(ASIC_InternalSS_Info), + struct_size(header, asSpreadSpectrum, 1))); ++ if (!header) ++ return result; + + memset(info, 0, sizeof(struct spread_spectrum_info)); + +@@ -1109,6 +1114,8 @@ static enum bp_result get_ss_info_from_ss_info_table( + get_atom_data_table_revision(header, &revision); + + tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO, DATA_TABLES(SS_Info)); ++ if (!tbl) ++ return result; + + if (1 != revision.major || 2 > revision.minor) + return result; +@@ -1636,6 +1643,8 @@ static uint32_t get_ss_entry_number_from_ss_info_tbl( + + tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO, + DATA_TABLES(SS_Info)); ++ if (!tbl) ++ return number; + + if (1 != revision.major || 2 > revision.minor) + return number; +@@ -1718,6 +1727,8 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1( + &bp->base, + DATA_TABLES(ASIC_InternalSS_Info), + struct_size(header_include, asSpreadSpectrum, 1))); ++ if (!header_include) ++ return 0; + + size = (le16_to_cpu(header_include->sHeader.usStructureSize) + - sizeof(ATOM_COMMON_TABLE_HEADER)) +@@ -1756,6 +1767,9 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1( + header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V3 *) bios_get_image(&bp->base, + DATA_TABLES(ASIC_InternalSS_Info), + struct_size(header_include, asSpreadSpectrum, 1))); ++ if (!header_include) ++ return number; ++ + size = (le16_to_cpu(header_include->sHeader.usStructureSize) - + sizeof(ATOM_COMMON_TABLE_HEADER)) / + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); +@@ -2552,8 +2566,8 @@ static enum bp_result construct_integrated_info( + + /* Sort voltage table from low to high*/ + if (result == BP_RESULT_OK) { +- uint32_t i; +- uint32_t j; ++ int32_t i; ++ int32_t j; + + for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) { + for (j = i; j > 0; --j) { +diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +index 93720cf069d7c..384ddb28e6f6d 100644 +--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c ++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +@@ -2935,8 +2935,11 @@ static enum bp_result construct_integrated_info( + struct atom_common_table_header *header; + struct atom_data_revision revision; + +- uint32_t i; +- uint32_t j; ++ int32_t i; ++ int32_t j; ++ ++ if (!info) ++ return result; + + if (info && DATA_TABLES(integratedsysteminfo)) { + header = GET_IMAGE(struct atom_common_table_header, +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +index 0c6a4ab72b1d2..97cdc24cef9a5 100644 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +@@ -484,7 +484,8 @@ static void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_sm + ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + + /* Modify previous watermark range to cover up to max */ +- ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; ++ if (num_valid_sets > 0) ++ ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + } + num_valid_sets++; + } +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c +index 72db370e2f21f..50e643bfdfbad 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c +@@ -1298,6 +1298,7 @@ struct dc *dc_create(const struct dc_init_data *init_params) + return NULL; + + if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { ++ dc->caps.linear_pitch_alignment = 64; + if (!dc_construct_ctx(dc, init_params)) + goto destruct_dc; + } else { +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +index 84923c5400d32..733e445331ea5 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +@@ -3927,6 +3927,9 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream, + + enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream) + { ++ if (dc == NULL || stream == NULL) ++ return DC_ERROR_UNEXPECTED; ++ + struct dc_link *link = stream->link; + struct timing_generator *tg = dc->res_pool->timing_generators[0]; + enum dc_status res = DC_OK; +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +index 28149e53c2a68..eeb5b8247c965 100644 +--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c ++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +@@ -102,7 +102,8 @@ static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, + break; + } + +- fsleep(500); ++ /* must *not* be fsleep - this can be called from high irq levels */ ++ udelay(500); + } + + /* assert if max retry hit */ +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c +index 994fb732a7cb7..a0d437f0ce2ba 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c +@@ -690,6 +690,9 @@ static void wbscl_set_scaler_filter( + int pair; + uint16_t odd_coef, even_coef; + ++ if (!filter) ++ return; ++ + for (phase = 0; phase < (NUM_PHASES / 2 + 1); phase++) { + for (pair = 0; pair < tap_pairs; pair++) { + even_coef = filter[phase * taps + 2 * pair]; +diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c +index 50b0434354f8f..c08169de3660c 100644 +--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c ++++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c +@@ -1453,10 +1453,9 @@ void dcn_bw_update_from_pplib_fclks( + ASSERT(fclks->num_levels); + + vmin0p65_idx = 0; +- vmid0p72_idx = fclks->num_levels - +- (fclks->num_levels > 2 ? 3 : (fclks->num_levels > 1 ? 2 : 1)); +- vnom0p8_idx = fclks->num_levels - (fclks->num_levels > 1 ? 2 : 1); +- vmax0p9_idx = fclks->num_levels - 1; ++ vmid0p72_idx = fclks->num_levels > 2 ? fclks->num_levels - 3 : 0; ++ vnom0p8_idx = fclks->num_levels > 1 ? fclks->num_levels - 2 : 0; ++ vmax0p9_idx = fclks->num_levels > 0 ? fclks->num_levels - 1 : 0; + + dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = + 32 * (fclks->data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0; +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c +index e2bcd205aa936..8da97a96b1ceb 100644 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c +@@ -304,6 +304,16 @@ void dcn302_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p + dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; + } + ++ /* bw_params->clk_table.entries[MAX_NUM_DPM_LVL]. ++ * MAX_NUM_DPM_LVL is 8. ++ * dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES]. ++ * DC__VOLTAGE_STATES is 40. ++ */ ++ if (num_states > MAX_NUM_DPM_LVL) { ++ ASSERT(0); ++ return; ++ } ++ + dcn3_02_soc.num_states = num_states; + for (i = 0; i < dcn3_02_soc.num_states; i++) { + dcn3_02_soc.clock_limits[i].state = i; +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c +index 3eb3a021ab7d7..c283780ad0621 100644 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c +@@ -299,6 +299,16 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p + dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; + } + ++ /* bw_params->clk_table.entries[MAX_NUM_DPM_LVL]. ++ * MAX_NUM_DPM_LVL is 8. ++ * dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES]. ++ * DC__VOLTAGE_STATES is 40. ++ */ ++ if (num_states > MAX_NUM_DPM_LVL) { ++ ASSERT(0); ++ return; ++ } ++ + dcn3_03_soc.num_states = num_states; + for (i = 0; i < dcn3_03_soc.num_states; i++) { + dcn3_03_soc.clock_limits[i].state = i; +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +index cf3b400c8619b..3d82cbef12740 100644 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +@@ -2885,6 +2885,16 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa + dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; + } + ++ /* bw_params->clk_table.entries[MAX_NUM_DPM_LVL]. ++ * MAX_NUM_DPM_LVL is 8. ++ * dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES]. ++ * DC__VOLTAGE_STATES is 40. ++ */ ++ if (num_states > MAX_NUM_DPM_LVL) { ++ ASSERT(0); ++ return; ++ } ++ + dcn3_2_soc.num_states = num_states; + for (i = 0; i < dcn3_2_soc.num_states; i++) { + dcn3_2_soc.clock_limits[i].state = i; +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c +index b26fcf86014c7..ae2196c36f218 100644 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c +@@ -789,6 +789,16 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p + dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; + } + ++ /* bw_params->clk_table.entries[MAX_NUM_DPM_LVL]. ++ * MAX_NUM_DPM_LVL is 8. ++ * dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES]. ++ * DC__VOLTAGE_STATES is 40. ++ */ ++ if (num_states > MAX_NUM_DPM_LVL) { ++ ASSERT(0); ++ return; ++ } ++ + dcn3_21_soc.num_states = num_states; + for (i = 0; i < dcn3_21_soc.num_states; i++) { + dcn3_21_soc.clock_limits[i].state = i; +diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +index 9a3ded3111952..85453bbb4f9b1 100644 +--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c ++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +@@ -1099,8 +1099,13 @@ void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib) + + // Total Available Pipes Support Check + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { +- total_pipes += mode_lib->vba.DPPPerPlane[k]; + pipe_idx = get_pipe_idx(mode_lib, k); ++ if (pipe_idx == -1) { ++ ASSERT(0); ++ continue; // skip inactive planes ++ } ++ total_pipes += mode_lib->vba.DPPPerPlane[k]; ++ + if (mode_lib->vba.cache_pipes[pipe_idx].clks_cfg.dppclk_mhz > 0.0) + mode_lib->vba.DPPCLK[k] = mode_lib->vba.cache_pipes[pipe_idx].clks_cfg.dppclk_mhz; + else +diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c +index 3ede6e02c3a78..f2037d78f71ab 100644 +--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c ++++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c +@@ -56,7 +56,7 @@ struct gpio_service *dal_gpio_service_create( + struct dc_context *ctx) + { + struct gpio_service *service; +- uint32_t index_of_id; ++ int32_t index_of_id; + + service = kzalloc(sizeof(struct gpio_service), GFP_KERNEL); + +@@ -112,7 +112,7 @@ struct gpio_service *dal_gpio_service_create( + return service; + + failure_2: +- while (index_of_id) { ++ while (index_of_id > 0) { + --index_of_id; + kfree(service->busyness[index_of_id]); + } +@@ -239,6 +239,9 @@ static bool is_pin_busy( + enum gpio_id id, + uint32_t en) + { ++ if (id == GPIO_ID_UNKNOWN) ++ return false; ++ + return service->busyness[id][en]; + } + +@@ -247,6 +250,9 @@ static void set_pin_busy( + enum gpio_id id, + uint32_t en) + { ++ if (id == GPIO_ID_UNKNOWN) ++ return; ++ + service->busyness[id][en] = true; + } + +@@ -255,6 +261,9 @@ static void set_pin_free( + enum gpio_id id, + uint32_t en) + { ++ if (id == GPIO_ID_UNKNOWN) ++ return; ++ + service->busyness[id][en] = false; + } + +@@ -263,7 +272,7 @@ enum gpio_result dal_gpio_service_lock( + enum gpio_id id, + uint32_t en) + { +- if (!service->busyness[id]) { ++ if (id != GPIO_ID_UNKNOWN && !service->busyness[id]) { + ASSERT_CRITICAL(false); + return GPIO_RESULT_OPEN_FAILED; + } +@@ -277,7 +286,7 @@ enum gpio_result dal_gpio_service_unlock( + enum gpio_id id, + uint32_t en) + { +- if (!service->busyness[id]) { ++ if (id != GPIO_ID_UNKNOWN && !service->busyness[id]) { + ASSERT_CRITICAL(false); + return GPIO_RESULT_OPEN_FAILED; + } +diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c +index 25ffc052d53be..df2cb5279ce51 100644 +--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c ++++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c +@@ -130,13 +130,21 @@ static bool hdmi_14_process_transaction( + const uint8_t hdcp_i2c_addr_link_primary = 0x3a; /* 0x74 >> 1*/ + const uint8_t hdcp_i2c_addr_link_secondary = 0x3b; /* 0x76 >> 1*/ + struct i2c_command i2c_command; +- uint8_t offset = hdcp_i2c_offsets[message_info->msg_id]; ++ uint8_t offset; + struct i2c_payload i2c_payloads[] = { +- { true, 0, 1, &offset }, ++ { true, 0, 1, 0 }, + /* actual hdcp payload, will be filled later, zeroed for now*/ + { 0 } + }; + ++ if (message_info->msg_id == HDCP_MESSAGE_ID_INVALID) { ++ DC_LOG_ERROR("%s: Invalid message_info msg_id - %d\n", __func__, message_info->msg_id); ++ return false; ++ } ++ ++ offset = hdcp_i2c_offsets[message_info->msg_id]; ++ i2c_payloads[0].data = &offset; ++ + switch (message_info->link) { + case HDCP_LINK_SECONDARY: + i2c_payloads[0].address = hdcp_i2c_addr_link_secondary; +@@ -310,6 +318,11 @@ static bool dp_11_process_transaction( + struct dc_link *link, + struct hdcp_protection_message *message_info) + { ++ if (message_info->msg_id == HDCP_MESSAGE_ID_INVALID) { ++ DC_LOG_ERROR("%s: Invalid message_info msg_id - %d\n", __func__, message_info->msg_id); ++ return false; ++ } ++ + return dpcd_access_helper( + link, + message_info->length, +diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +index 9a0beaf601f87..3d589072fe307 100644 +--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c ++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +@@ -528,7 +528,7 @@ static bool decide_fallback_link_setting_max_bw_policy( + struct dc_link_settings *cur, + enum link_training_result training_result) + { +- uint8_t cur_idx = 0, next_idx; ++ uint32_t cur_idx = 0, next_idx; + bool found = false; + + if (training_result == LINK_TRAINING_ABORT) +@@ -908,21 +908,17 @@ bool link_decide_link_settings(struct dc_stream_state *stream, + + memset(link_setting, 0, sizeof(*link_setting)); + +- /* if preferred is specified through AMDDP, use it, if it's enough +- * to drive the mode +- */ +- if (link->preferred_link_setting.lane_count != +- LANE_COUNT_UNKNOWN && +- link->preferred_link_setting.link_rate != +- LINK_RATE_UNKNOWN) { ++ if (dc_is_dp_signal(stream->signal) && ++ link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN && ++ link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) { ++ /* if preferred is specified through AMDDP, use it, if it's enough ++ * to drive the mode ++ */ + *link_setting = link->preferred_link_setting; +- return true; +- } +- +- /* MST doesn't perform link training for now +- * TODO: add MST specific link training routine +- */ +- if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { ++ } else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { ++ /* MST doesn't perform link training for now ++ * TODO: add MST specific link training routine ++ */ + decide_mst_link_settings(link, link_setting); + } else if (link->connector_signal == SIGNAL_TYPE_EDP) { + /* enable edp link optimization for DSC eDP case */ +diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +index 16a62e0187122..9d1adfc09fb2a 100644 +--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c ++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +@@ -914,10 +914,10 @@ static enum dc_status configure_lttpr_mode_non_transparent( + /* Driver does not need to train the first hop. Skip DPCD read and clear + * AUX_RD_INTERVAL for DPTX-to-DPIA hop. + */ +- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ++ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && repeater_cnt > 0 && repeater_cnt < MAX_REPEATER_CNT) + link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0; + +- for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) { ++ for (repeater_id = repeater_cnt; repeater_id > 0 && repeater_id < MAX_REPEATER_CNT; repeater_id--) { + aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1)); + core_link_read_dpcd( +diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c +index 8e9caae7c9559..1b2df97226a3f 100644 +--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c ++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c +@@ -156,11 +156,16 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp, + uint32_t cur_size = 0; + uint32_t data_offset = 0; + +- if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) { ++ if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID || ++ msg_id >= MOD_HDCP_MESSAGE_ID_MAX) + return MOD_HDCP_STATUS_DDC_FAILURE; +- } + + if (is_dp_hdcp(hdcp)) { ++ int num_dpcd_addrs = sizeof(hdcp_dpcd_addrs) / ++ sizeof(hdcp_dpcd_addrs[0]); ++ if (msg_id >= num_dpcd_addrs) ++ return MOD_HDCP_STATUS_DDC_FAILURE; ++ + while (buf_len > 0) { + cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE); + success = hdcp->config.ddc.funcs.read_dpcd(hdcp->config.ddc.handle, +@@ -175,6 +180,11 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp, + data_offset += cur_size; + } + } else { ++ int num_i2c_offsets = sizeof(hdcp_i2c_offsets) / ++ sizeof(hdcp_i2c_offsets[0]); ++ if (msg_id >= num_i2c_offsets) ++ return MOD_HDCP_STATUS_DDC_FAILURE; ++ + success = hdcp->config.ddc.funcs.read_i2c( + hdcp->config.ddc.handle, + HDCP_I2C_ADDR, +@@ -219,11 +229,16 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp, + uint32_t cur_size = 0; + uint32_t data_offset = 0; + +- if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) { ++ if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID || ++ msg_id >= MOD_HDCP_MESSAGE_ID_MAX) + return MOD_HDCP_STATUS_DDC_FAILURE; +- } + + if (is_dp_hdcp(hdcp)) { ++ int num_dpcd_addrs = sizeof(hdcp_dpcd_addrs) / ++ sizeof(hdcp_dpcd_addrs[0]); ++ if (msg_id >= num_dpcd_addrs) ++ return MOD_HDCP_STATUS_DDC_FAILURE; ++ + while (buf_len > 0) { + cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE); + success = hdcp->config.ddc.funcs.write_dpcd( +@@ -239,6 +254,11 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp, + data_offset += cur_size; + } + } else { ++ int num_i2c_offsets = sizeof(hdcp_i2c_offsets) / ++ sizeof(hdcp_i2c_offsets[0]); ++ if (msg_id >= num_i2c_offsets) ++ return MOD_HDCP_STATUS_DDC_FAILURE; ++ + hdcp->buf[0] = hdcp_i2c_offsets[msg_id]; + memmove(&hdcp->buf[1], buf, buf_len); + success = hdcp->config.ddc.funcs.write_i2c( +diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +index 7bf46e4974f88..86f95a291d65f 100644 +--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c ++++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +@@ -99,7 +99,7 @@ static void pp_swctf_delayed_work_handler(struct work_struct *work) + struct amdgpu_device *adev = hwmgr->adev; + struct amdgpu_dpm_thermal *range = + &adev->pm.dpm.thermal; +- uint32_t gpu_temperature, size; ++ uint32_t gpu_temperature, size = sizeof(gpu_temperature); + int ret; + + /* +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c +index f4bd8e9357e22..18f00038d8441 100644 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c +@@ -30,9 +30,8 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr) + { + int result; + unsigned int i; +- unsigned int table_entries; + struct pp_power_state *state; +- int size; ++ int size, table_entries; + + if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL) + return 0; +@@ -40,15 +39,19 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr) + if (hwmgr->hwmgr_func->get_power_state_size == NULL) + return 0; + +- hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr); ++ table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr); + +- hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) + ++ size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) + + sizeof(struct pp_power_state); + +- if (table_entries == 0 || size == 0) { ++ if (table_entries <= 0 || size == 0) { + pr_warn("Please check whether power state management is supported on this asic\n"); ++ hwmgr->num_ps = 0; ++ hwmgr->ps_size = 0; + return 0; + } ++ hwmgr->num_ps = table_entries; ++ hwmgr->ps_size = size; + + hwmgr->ps = kcalloc(table_entries, size, GFP_KERNEL); + if (hwmgr->ps == NULL) +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c +index f503e61faa600..cc3b62f733941 100644 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c +@@ -73,8 +73,9 @@ static int atomctrl_retrieve_ac_timing( + j++; + } else if ((table->mc_reg_address[i].uc_pre_reg_data & + LOW_NIBBLE_MASK) == DATA_EQU_PREV) { +- table->mc_reg_table_entry[num_ranges].mc_data[i] = +- table->mc_reg_table_entry[num_ranges].mc_data[i-1]; ++ if (i) ++ table->mc_reg_table_entry[num_ranges].mc_data[i] = ++ table->mc_reg_table_entry[num_ranges].mc_data[i-1]; + } + } + num_ranges++; +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +index 02ba68d7c6546..f62381b189ade 100644 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +@@ -1036,7 +1036,9 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, + + switch (type) { + case PP_SCLK: +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now); ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now); ++ if (ret) ++ return ret; + + /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */ + if (now == data->gfx_max_freq_limit/100) +@@ -1057,7 +1059,9 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, + i == 2 ? "*" : ""); + break; + case PP_MCLK: +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now); ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now); ++ if (ret) ++ return ret; + + for (i = 0; i < mclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", +@@ -1550,7 +1554,10 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr, + } + + if (input[0] == 0) { +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq); ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq); ++ if (ret) ++ return ret; ++ + if (input[1] < min_freq) { + pr_err("Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", + input[1], min_freq); +@@ -1558,7 +1565,10 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr, + } + smu10_data->gfx_actual_soft_min_freq = input[1]; + } else if (input[0] == 1) { +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq); ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq); ++ if (ret) ++ return ret; ++ + if (input[1] > max_freq) { + pr_err("Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", + input[1], max_freq); +@@ -1573,10 +1583,15 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr, + pr_err("Input parameter number not correct\n"); + return -EINVAL; + } +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq); +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq); +- ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq); ++ if (ret) ++ return ret; + smu10_data->gfx_actual_soft_min_freq = min_freq; ++ ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq); ++ if (ret) ++ return ret; ++ + smu10_data->gfx_actual_soft_max_freq = max_freq; + } else if (type == PP_OD_COMMIT_DPM_TABLE) { + if (size != 0) { +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +index 163864bd51c34..53849fd3615f6 100644 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +@@ -5641,7 +5641,7 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint + mode = input[size]; + switch (mode) { + case PP_SMC_POWER_PROFILE_CUSTOM: +- if (size < 8 && size != 0) ++ if (size != 8 && size != 0) + return -EINVAL; + /* If only CUSTOM is passed in, use the saved values. Check + * that we actually have a CUSTOM profile by ensuring that +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +index eb744401e0567..7e11974208732 100644 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +@@ -584,6 +584,7 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr) + hwmgr->dyn_state.uvd_clock_voltage_dependency_table; + unsigned long clock = 0; + uint32_t level; ++ int ret; + + if (NULL == table || table->count <= 0) + return -EINVAL; +@@ -591,7 +592,9 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr) + data->uvd_dpm.soft_min_clk = 0; + data->uvd_dpm.hard_min_clk = 0; + +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level); ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level); ++ if (ret) ++ return ret; + + if (level < table->count) + clock = table->entries[level].vclk; +@@ -611,6 +614,7 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr) + hwmgr->dyn_state.vce_clock_voltage_dependency_table; + unsigned long clock = 0; + uint32_t level; ++ int ret; + + if (NULL == table || table->count <= 0) + return -EINVAL; +@@ -618,7 +622,9 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr) + data->vce_dpm.soft_min_clk = 0; + data->vce_dpm.hard_min_clk = 0; + +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level); ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level); ++ if (ret) ++ return ret; + + if (level < table->count) + clock = table->entries[level].ecclk; +@@ -638,6 +644,7 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr) + hwmgr->dyn_state.acp_clock_voltage_dependency_table; + unsigned long clock = 0; + uint32_t level; ++ int ret; + + if (NULL == table || table->count <= 0) + return -EINVAL; +@@ -645,7 +652,9 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr) + data->acp_dpm.soft_min_clk = 0; + data->acp_dpm.hard_min_clk = 0; + +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level); ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level); ++ if (ret) ++ return ret; + + if (level < table->count) + clock = table->entries[level].acpclk; +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +index d43a530aba0e3..6c87b3d4ab362 100644 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +@@ -354,13 +354,13 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr) + return 0; + } + +-static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) ++static int vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) + { + struct vega10_hwmgr *data = hwmgr->backend; +- int i; + uint32_t sub_vendor_id, hw_revision; + uint32_t top32, bottom32; + struct amdgpu_device *adev = hwmgr->adev; ++ int ret, i; + + vega10_initialize_power_tune_defaults(hwmgr); + +@@ -485,9 +485,12 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) + if (data->registry_data.vr0hot_enabled) + data->smu_features[GNLD_VR0HOT].supported = true; + +- smum_send_msg_to_smc(hwmgr, ++ ret = smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_GetSmuVersion, + &hwmgr->smu_version); ++ if (ret) ++ return ret; ++ + /* ACG firmware has major version 5 */ + if ((hwmgr->smu_version & 0xff000000) == 0x5000000) + data->smu_features[GNLD_ACG].supported = true; +@@ -505,10 +508,16 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) + data->smu_features[GNLD_PCC_LIMIT].supported = true; + + /* Get the SN to turn into a Unique ID */ +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); ++ if (ret) ++ return ret; ++ ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); ++ if (ret) ++ return ret; + + adev->unique_id = ((uint64_t)bottom32 << 32) | top32; ++ return 0; + } + + #ifdef PPLIB_VEGA10_EVV_SUPPORT +@@ -882,7 +891,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) + + vega10_set_features_platform_caps(hwmgr); + +- vega10_init_dpm_defaults(hwmgr); ++ result = vega10_init_dpm_defaults(hwmgr); ++ if (result) ++ return result; + + #ifdef PPLIB_VEGA10_EVV_SUPPORT + /* Get leakage voltage based on leakage ID. */ +@@ -2350,15 +2361,20 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr) + { + struct vega10_hwmgr *data = hwmgr->backend; + uint32_t agc_btc_response; ++ int ret; + + if (data->smu_features[GNLD_ACG].supported) { + if (0 == vega10_enable_smc_features(hwmgr, true, + data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap)) + data->smu_features[GNLD_DPM_PREFETCHER].enabled = true; + +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL); ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL); ++ if (ret) ++ return ret; + +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response); ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response); ++ if (ret) ++ agc_btc_response = 0; + + if (1 == agc_btc_response) { + if (1 == data->acg_loop_state) +@@ -2571,8 +2587,11 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) + } + } + +- pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC, ++ result = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC, + VOLTAGE_OBJ_SVID2, &voltage_table); ++ PP_ASSERT_WITH_CODE(!result, ++ "Failed to get voltage table!", ++ return result); + pp_table->MaxVidStep = voltage_table.max_vid_step; + + pp_table->GfxDpmVoltageMode = +@@ -3910,11 +3929,14 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr, + uint32_t *query) + { + uint32_t value; ++ int ret; + + if (!query) + return -EINVAL; + +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value); ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value); ++ if (ret) ++ return ret; + + /* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */ + *query = value << 8; +@@ -4810,14 +4832,16 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, + uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width; + PPTable_t *pptable = &(data->smc_state_table.pp_table); + +- int i, now, size = 0, count = 0; ++ int i, ret, now, size = 0, count = 0; + + switch (type) { + case PP_SCLK: + if (data->registry_data.sclk_dpm_key_disabled) + break; + +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now); ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now); ++ if (ret) ++ break; + + if (hwmgr->pp_one_vf && + (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) +@@ -4833,7 +4857,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, + if (data->registry_data.mclk_dpm_key_disabled) + break; + +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now); ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now); ++ if (ret) ++ break; + + for (i = 0; i < mclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", +@@ -4844,7 +4870,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, + if (data->registry_data.socclk_dpm_key_disabled) + break; + +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now); ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now); ++ if (ret) ++ break; + + for (i = 0; i < soc_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", +@@ -4855,8 +4883,10 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, + if (data->registry_data.dcefclk_dpm_key_disabled) + break; + +- smum_send_msg_to_smc_with_parameter(hwmgr, ++ ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now); ++ if (ret) ++ break; + + for (i = 0; i < dcef_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +index 460067933de2e..069c0f5205e00 100644 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +@@ -293,12 +293,12 @@ static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr) + return 0; + } + +-static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr) ++static int vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr) + { + struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); + struct amdgpu_device *adev = hwmgr->adev; + uint32_t top32, bottom32; +- int i; ++ int i, ret; + + data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id = + FEATURE_DPM_PREFETCHER_BIT; +@@ -364,10 +364,16 @@ static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr) + } + + /* Get the SN to turn into a Unique ID */ +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); ++ if (ret) ++ return ret; ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); ++ if (ret) ++ return ret; + + adev->unique_id = ((uint64_t)bottom32 << 32) | top32; ++ ++ return 0; + } + + static int vega12_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) +@@ -410,7 +416,11 @@ static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr) + + vega12_set_features_platform_caps(hwmgr); + +- vega12_init_dpm_defaults(hwmgr); ++ result = vega12_init_dpm_defaults(hwmgr); ++ if (result) { ++ pr_err("%s failed\n", __func__); ++ return result; ++ } + + /* Parse pptable data read from VBIOS */ + vega12_set_private_data_based_on_pptable(hwmgr); +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +index 3b33af30eb0fb..9fdb9990d1882 100644 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +@@ -328,12 +328,12 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr) + return 0; + } + +-static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr) ++static int vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr) + { + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + struct amdgpu_device *adev = hwmgr->adev; + uint32_t top32, bottom32; +- int i; ++ int i, ret; + + data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id = + FEATURE_DPM_PREFETCHER_BIT; +@@ -404,10 +404,17 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr) + } + + /* Get the SN to turn into a Unique ID */ +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); ++ if (ret) ++ return ret; ++ ++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); ++ if (ret) ++ return ret; + + adev->unique_id = ((uint64_t)bottom32 << 32) | top32; ++ ++ return 0; + } + + static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) +@@ -427,6 +434,7 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr) + { + struct vega20_hwmgr *data; + struct amdgpu_device *adev = hwmgr->adev; ++ int result; + + data = kzalloc(sizeof(struct vega20_hwmgr), GFP_KERNEL); + if (data == NULL) +@@ -452,8 +460,11 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr) + + vega20_set_features_platform_caps(hwmgr); + +- vega20_init_dpm_defaults(hwmgr); +- ++ result = vega20_init_dpm_defaults(hwmgr); ++ if (result) { ++ pr_err("%s failed\n", __func__); ++ return result; ++ } + /* Parse pptable data read from VBIOS */ + vega20_set_private_data_based_on_pptable(hwmgr); + +@@ -4091,9 +4102,11 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui + if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); +- if (size == 0 && !data->is_custom_profile_set) ++ ++ if (size != 10 && size != 0) + return -EINVAL; +- if (size < 10 && size != 0) ++ ++ if (size == 0 && !data->is_custom_profile_set) + return -EINVAL; + + result = vega20_get_activity_monitor_coeff(hwmgr, +@@ -4155,6 +4168,8 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui + activity_monitor.Fclk_PD_Data_error_coeff = input[8]; + activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9]; + break; ++ default: ++ return -EINVAL; + } + + result = vega20_set_activity_monitor_coeff(hwmgr, +diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c +index a70d738966490..f9c0f117725dd 100644 +--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c ++++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c +@@ -130,13 +130,17 @@ int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr, + uint64_t *features_enabled) + { + uint32_t enabled_features; ++ int ret; + + if (features_enabled == NULL) + return -EINVAL; + +- smum_send_msg_to_smc(hwmgr, ++ ret = smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_GetEnabledSmuFeatures, + &enabled_features); ++ if (ret) ++ return ret; ++ + *features_enabled = enabled_features; + + return 0; +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +index c564f6e191f84..b1b23233635a6 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +@@ -1222,19 +1222,22 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, + value); + } + +-static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type) ++static int navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type) + { + PPTable_t *pptable = smu->smu_table.driver_pptable; + DpmDescriptor_t *dpm_desc = NULL; +- uint32_t clk_index = 0; ++ int clk_index = 0; + + clk_index = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_CLK, + clk_type); ++ if (clk_index < 0) ++ return clk_index; ++ + dpm_desc = &pptable->DpmDescriptor[clk_index]; + + /* 0 - Fine grained DPM, 1 - Discrete DPM */ +- return dpm_desc->SnapToDiscrete == 0; ++ return dpm_desc->SnapToDiscrete == 0 ? 1 : 0; + } + + static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap) +@@ -1290,7 +1293,11 @@ static int navi10_emit_clk_levels(struct smu_context *smu, + if (ret) + return ret; + +- if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) { ++ ret = navi10_is_support_fine_grained_dpm(smu, clk_type); ++ if (ret < 0) ++ return ret; ++ ++ if (!ret) { + for (i = 0; i < count; i++) { + ret = smu_v11_0_get_dpm_freq_by_index(smu, + clk_type, i, &value); +@@ -1499,7 +1506,11 @@ static int navi10_print_clk_levels(struct smu_context *smu, + if (ret) + return size; + +- if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) { ++ ret = navi10_is_support_fine_grained_dpm(smu, clk_type); ++ if (ret < 0) ++ return ret; ++ ++ if (!ret) { + for (i = 0; i < count; i++) { + ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value); + if (ret) +@@ -1668,7 +1679,11 @@ static int navi10_force_clk_levels(struct smu_context *smu, + case SMU_UCLK: + case SMU_FCLK: + /* There is only 2 levels for fine grained DPM */ +- if (navi10_is_support_fine_grained_dpm(smu, clk_type)) { ++ ret = navi10_is_support_fine_grained_dpm(smu, clk_type); ++ if (ret < 0) ++ return ret; ++ ++ if (ret) { + soft_max_level = (soft_max_level >= 1 ? 1 : 0); + soft_min_level = (soft_min_level >= 1 ? 1 : 0); + } +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +index 201cec5998428..f46cda8894831 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +@@ -1009,6 +1009,18 @@ static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu, + } + } + if (min) { ++ ret = vangogh_get_profiling_clk_mask(smu, ++ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK, ++ NULL, ++ NULL, ++ &mclk_mask, ++ &fclk_mask, ++ &soc_mask); ++ if (ret) ++ goto failed; ++ ++ vclk_mask = dclk_mask = 0; ++ + switch (clk_type) { + case SMU_UCLK: + case SMU_MCLK: +@@ -2481,6 +2493,8 @@ static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start) + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency, + start, &residency); ++ if (ret) ++ return ret; + + if (!start) + adev->gfx.gfx_off_residency = residency; +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +index 5afd03e42bbfc..ded8952d98490 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +@@ -1931,7 +1931,8 @@ static int aldebaran_mode2_reset(struct smu_context *smu) + + index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, + SMU_MSG_GfxDeviceDriverReset); +- ++ if (index < 0 ) ++ return -EINVAL; + mutex_lock(&smu->message_lock); + if (smu_version >= 0x00441400) { + ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2); +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +index be4b7b64f8785..44c5f8585f1ee 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +@@ -2058,6 +2058,8 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu) + + index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, + SMU_MSG_GfxDeviceDriverReset); ++ if (index < 0) ++ return index; + + mutex_lock(&smu->message_lock); + +diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c +index d941c3a0e6113..7fd4a5fe03edf 100644 +--- a/drivers/gpu/drm/bridge/tc358767.c ++++ b/drivers/gpu/drm/bridge/tc358767.c +@@ -2034,7 +2034,7 @@ static irqreturn_t tc_irq_handler(int irq, void *arg) + dev_err(tc->dev, "syserr %x\n", stat); + } + +- if (tc->hpd_pin >= 0 && tc->bridge.dev) { ++ if (tc->hpd_pin >= 0 && tc->bridge.dev && tc->aux.drm_dev) { + /* + * H is triggered when the GPIO goes high. + * +diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c +index 117237d3528bd..618b045230336 100644 +--- a/drivers/gpu/drm/drm_fb_helper.c ++++ b/drivers/gpu/drm/drm_fb_helper.c +@@ -631,6 +631,17 @@ static void drm_fb_helper_add_damage_clip(struct drm_fb_helper *helper, u32 x, u + static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y, + u32 width, u32 height) + { ++ /* ++ * This function may be invoked by panic() to flush the frame ++ * buffer, where all CPUs except the panic CPU are stopped. ++ * During the following schedule_work(), the panic CPU needs ++ * the worker_pool lock, which might be held by a stopped CPU, ++ * causing schedule_work() and panic() to block. Return early on ++ * oops_in_progress to prevent this blocking. ++ */ ++ if (oops_in_progress) ++ return; ++ + drm_fb_helper_add_damage_clip(helper, x, y, width, height); + + schedule_work(&helper->damage_work); +diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c +index 5db52d6c5c35c..039da0d1a613b 100644 +--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c ++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c +@@ -414,6 +414,12 @@ static const struct dmi_system_id orientation_data[] = { + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"), + }, + .driver_data = (void *)&lcd1600x2560_leftside_up, ++ }, { /* OrangePi Neo */ ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "NEO-01"), ++ }, ++ .driver_data = (void *)&lcd1200x1920_rightside_up, + }, { /* Samsung GalaxyBook 10.6 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), +diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c +index 815dfe30492b6..b43ac61201f31 100644 +--- a/drivers/gpu/drm/meson/meson_plane.c ++++ b/drivers/gpu/drm/meson/meson_plane.c +@@ -534,6 +534,7 @@ int meson_plane_create(struct meson_drm *priv) + struct meson_plane *meson_plane; + struct drm_plane *plane; + const uint64_t *format_modifiers = format_modifiers_default; ++ int ret; + + meson_plane = devm_kzalloc(priv->drm->dev, sizeof(*meson_plane), + GFP_KERNEL); +@@ -548,12 +549,16 @@ int meson_plane_create(struct meson_drm *priv) + else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) + format_modifiers = format_modifiers_afbc_g12a; + +- drm_universal_plane_init(priv->drm, plane, 0xFF, +- &meson_plane_funcs, +- supported_drm_formats, +- ARRAY_SIZE(supported_drm_formats), +- format_modifiers, +- DRM_PLANE_TYPE_PRIMARY, "meson_primary_plane"); ++ ret = drm_universal_plane_init(priv->drm, plane, 0xFF, ++ &meson_plane_funcs, ++ supported_drm_formats, ++ ARRAY_SIZE(supported_drm_formats), ++ format_modifiers, ++ DRM_PLANE_TYPE_PRIMARY, "meson_primary_plane"); ++ if (ret) { ++ devm_kfree(priv->drm->dev, meson_plane); ++ return ret; ++ } + + drm_plane_helper_add(plane, &meson_plane_helper_funcs); + +diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c +index bae0becfa24be..ae0f454c305d6 100644 +--- a/drivers/hwmon/k10temp.c ++++ b/drivers/hwmon/k10temp.c +@@ -153,8 +153,9 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval) + + static void read_tempreg_nb_zen(struct pci_dev *pdev, u32 *regval) + { +- amd_smn_read(amd_pci_dev_to_node_id(pdev), +- ZEN_REPORTED_TEMP_CTRL_BASE, regval); ++ if (amd_smn_read(amd_pci_dev_to_node_id(pdev), ++ ZEN_REPORTED_TEMP_CTRL_BASE, regval)) ++ *regval = 0; + } + + static long get_raw_temp(struct k10temp_data *data) +@@ -205,6 +206,7 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel, + long *val) + { + struct k10temp_data *data = dev_get_drvdata(dev); ++ int ret = -EOPNOTSUPP; + u32 regval; + + switch (attr) { +@@ -221,13 +223,17 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel, + *val = 0; + break; + case 2 ... 13: /* Tccd{1-12} */ +- amd_smn_read(amd_pci_dev_to_node_id(data->pdev), +- ZEN_CCD_TEMP(data->ccd_offset, channel - 2), +- ®val); ++ ret = amd_smn_read(amd_pci_dev_to_node_id(data->pdev), ++ ZEN_CCD_TEMP(data->ccd_offset, channel - 2), ++ ®val); ++ ++ if (ret) ++ return ret; ++ + *val = (regval & ZEN_CCD_TEMP_MASK) * 125 - 49000; + break; + default: +- return -EOPNOTSUPP; ++ return ret; + } + break; + case hwmon_temp_max: +@@ -243,7 +249,7 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel, + - ((regval >> 24) & 0xf)) * 500 + 52000; + break; + default: +- return -EOPNOTSUPP; ++ return ret; + } + return 0; + } +@@ -381,8 +387,20 @@ static void k10temp_get_ccd_support(struct pci_dev *pdev, + int i; + + for (i = 0; i < limit; i++) { +- amd_smn_read(amd_pci_dev_to_node_id(pdev), +- ZEN_CCD_TEMP(data->ccd_offset, i), ®val); ++ /* ++ * Ignore inaccessible CCDs. ++ * ++ * Some systems will return a register value of 0, and the TEMP_VALID ++ * bit check below will naturally fail. ++ * ++ * Other systems will return a PCI_ERROR_RESPONSE (0xFFFFFFFF) for ++ * the register value. And this will incorrectly pass the TEMP_VALID ++ * bit check. ++ */ ++ if (amd_smn_read(amd_pci_dev_to_node_id(pdev), ++ ZEN_CCD_TEMP(data->ccd_offset, i), ®val)) ++ continue; ++ + if (regval & ZEN_CCD_TEMP_VALID) + data->show_temp |= BIT(TCCD_BIT(i)); + } +diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c +index ada694ba9f958..f279dd010b73e 100644 +--- a/drivers/hwspinlock/hwspinlock_core.c ++++ b/drivers/hwspinlock/hwspinlock_core.c +@@ -302,6 +302,34 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) + } + EXPORT_SYMBOL_GPL(__hwspin_unlock); + ++/** ++ * hwspin_lock_bust() - bust a specific hwspinlock ++ * @hwlock: a previously-acquired hwspinlock which we want to bust ++ * @id: identifier of the remote lock holder, if applicable ++ * ++ * This function will bust a hwspinlock that was previously acquired as ++ * long as the current owner of the lock matches the id given by the caller. ++ * ++ * Context: Process context. ++ * ++ * Returns: 0 on success, or -EINVAL if the hwspinlock does not exist, or ++ * the bust operation fails, and -EOPNOTSUPP if the bust operation is not ++ * defined for the hwspinlock. ++ */ ++int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id) ++{ ++ if (WARN_ON(!hwlock)) ++ return -EINVAL; ++ ++ if (!hwlock->bank->ops->bust) { ++ pr_err("bust operation not defined\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ return hwlock->bank->ops->bust(hwlock, id); ++} ++EXPORT_SYMBOL_GPL(hwspin_lock_bust); ++ + /** + * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id + * @bank: the hwspinlock device bank +diff --git a/drivers/hwspinlock/hwspinlock_internal.h b/drivers/hwspinlock/hwspinlock_internal.h +index 29892767bb7a0..f298fc0ee5adb 100644 +--- a/drivers/hwspinlock/hwspinlock_internal.h ++++ b/drivers/hwspinlock/hwspinlock_internal.h +@@ -21,6 +21,8 @@ struct hwspinlock_device; + * @trylock: make a single attempt to take the lock. returns 0 on + * failure and true on success. may _not_ sleep. + * @unlock: release the lock. always succeed. may _not_ sleep. ++ * @bust: optional, platform-specific bust handler, called by hwspinlock ++ * core to bust a specific lock. + * @relax: optional, platform-specific relax handler, called by hwspinlock + * core while spinning on a lock, between two successive + * invocations of @trylock. may _not_ sleep. +@@ -28,6 +30,7 @@ struct hwspinlock_device; + struct hwspinlock_ops { + int (*trylock)(struct hwspinlock *lock); + void (*unlock)(struct hwspinlock *lock); ++ int (*bust)(struct hwspinlock *lock, unsigned int id); + void (*relax)(struct hwspinlock *lock); + }; + +diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c +index 5e1a85ca12119..121bde49ccb7d 100644 +--- a/drivers/iio/industrialio-core.c ++++ b/drivers/iio/industrialio-core.c +@@ -752,9 +752,11 @@ static ssize_t iio_read_channel_info(struct device *dev, + INDIO_MAX_RAW_ELEMENTS, + vals, &val_len, + this_attr->address); +- else ++ else if (indio_dev->info->read_raw) + ret = indio_dev->info->read_raw(indio_dev, this_attr->c, + &vals[0], &vals[1], this_attr->address); ++ else ++ return -EINVAL; + + if (ret < 0) + return ret; +@@ -836,6 +838,9 @@ static ssize_t iio_read_channel_info_avail(struct device *dev, + int length; + int type; + ++ if (!indio_dev->info->read_avail) ++ return -EINVAL; ++ + ret = indio_dev->info->read_avail(indio_dev, this_attr->c, + &vals, &type, &length, + this_attr->address); +diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c +index 19f7a91157ee4..f67e4afa5f94b 100644 +--- a/drivers/iio/industrialio-event.c ++++ b/drivers/iio/industrialio-event.c +@@ -285,6 +285,9 @@ static ssize_t iio_ev_state_store(struct device *dev, + if (ret < 0) + return ret; + ++ if (!indio_dev->info->write_event_config) ++ return -EINVAL; ++ + ret = indio_dev->info->write_event_config(indio_dev, + this_attr->c, iio_ev_attr_type(this_attr), + iio_ev_attr_dir(this_attr), val); +@@ -300,6 +303,9 @@ static ssize_t iio_ev_state_show(struct device *dev, + struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); + int val; + ++ if (!indio_dev->info->read_event_config) ++ return -EINVAL; ++ + val = indio_dev->info->read_event_config(indio_dev, + this_attr->c, iio_ev_attr_type(this_attr), + iio_ev_attr_dir(this_attr)); +@@ -318,6 +324,9 @@ static ssize_t iio_ev_value_show(struct device *dev, + int val, val2, val_arr[2]; + int ret; + ++ if (!indio_dev->info->read_event_value) ++ return -EINVAL; ++ + ret = indio_dev->info->read_event_value(indio_dev, + this_attr->c, iio_ev_attr_type(this_attr), + iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr), +diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c +index 7a1f6713318a3..b855565384757 100644 +--- a/drivers/iio/inkern.c ++++ b/drivers/iio/inkern.c +@@ -562,6 +562,7 @@ EXPORT_SYMBOL_GPL(devm_iio_channel_get_all); + static int iio_channel_read(struct iio_channel *chan, int *val, int *val2, + enum iio_chan_info_enum info) + { ++ const struct iio_info *iio_info = chan->indio_dev->info; + int unused; + int vals[INDIO_MAX_RAW_ELEMENTS]; + int ret; +@@ -573,15 +574,18 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2, + if (!iio_channel_has_info(chan->channel, info)) + return -EINVAL; + +- if (chan->indio_dev->info->read_raw_multi) { +- ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev, +- chan->channel, INDIO_MAX_RAW_ELEMENTS, +- vals, &val_len, info); ++ if (iio_info->read_raw_multi) { ++ ret = iio_info->read_raw_multi(chan->indio_dev, ++ chan->channel, ++ INDIO_MAX_RAW_ELEMENTS, ++ vals, &val_len, info); + *val = vals[0]; + *val2 = vals[1]; ++ } else if (iio_info->read_raw) { ++ ret = iio_info->read_raw(chan->indio_dev, ++ chan->channel, val, val2, info); + } else { +- ret = chan->indio_dev->info->read_raw(chan->indio_dev, +- chan->channel, val, val2, info); ++ return -EINVAL; + } + + return ret; +@@ -801,11 +805,15 @@ static int iio_channel_read_avail(struct iio_channel *chan, + const int **vals, int *type, int *length, + enum iio_chan_info_enum info) + { ++ const struct iio_info *iio_info = chan->indio_dev->info; ++ + if (!iio_channel_has_available(chan->channel, info)) + return -EINVAL; + +- return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel, +- vals, type, length, info); ++ if (iio_info->read_avail) ++ return iio_info->read_avail(chan->indio_dev, chan->channel, ++ vals, type, length, info); ++ return -EINVAL; + } + + int iio_read_avail_channel_attribute(struct iio_channel *chan, +@@ -995,8 +1003,12 @@ EXPORT_SYMBOL_GPL(iio_get_channel_type); + static int iio_channel_write(struct iio_channel *chan, int val, int val2, + enum iio_chan_info_enum info) + { +- return chan->indio_dev->info->write_raw(chan->indio_dev, +- chan->channel, val, val2, info); ++ const struct iio_info *iio_info = chan->indio_dev->info; ++ ++ if (iio_info->write_raw) ++ return iio_info->write_raw(chan->indio_dev, ++ chan->channel, val, val2, info); ++ return -EINVAL; + } + + int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2, +diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c +index 16a24a05fc2a6..bafd210dd43e8 100644 +--- a/drivers/infiniband/hw/efa/efa_com.c ++++ b/drivers/infiniband/hw/efa/efa_com.c +@@ -1,6 +1,6 @@ + // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause + /* +- * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved. ++ * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved. + */ + + #include "efa_com.h" +@@ -406,8 +406,8 @@ static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue + return comp_ctx; + } + +-static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq, +- struct efa_admin_acq_entry *cqe) ++static int efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq, ++ struct efa_admin_acq_entry *cqe) + { + struct efa_comp_ctx *comp_ctx; + u16 cmd_id; +@@ -416,11 +416,11 @@ static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *a + EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID); + + comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, false); +- if (!comp_ctx) { ++ if (comp_ctx->status != EFA_CMD_SUBMITTED) { + ibdev_err(aq->efa_dev, +- "comp_ctx is NULL. Changing the admin queue running state\n"); +- clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state); +- return; ++ "Received completion with unexpected command id[%d], sq producer: %d, sq consumer: %d, cq consumer: %d\n", ++ cmd_id, aq->sq.pc, aq->sq.cc, aq->cq.cc); ++ return -EINVAL; + } + + comp_ctx->status = EFA_CMD_COMPLETED; +@@ -428,14 +428,17 @@ static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *a + + if (!test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state)) + complete(&comp_ctx->wait_event); ++ ++ return 0; + } + + static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq) + { + struct efa_admin_acq_entry *cqe; + u16 queue_size_mask; +- u16 comp_num = 0; ++ u16 comp_cmds = 0; + u8 phase; ++ int err; + u16 ci; + + queue_size_mask = aq->depth - 1; +@@ -453,10 +456,12 @@ static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq) + * phase bit was validated + */ + dma_rmb(); +- efa_com_handle_single_admin_completion(aq, cqe); ++ err = efa_com_handle_single_admin_completion(aq, cqe); ++ if (!err) ++ comp_cmds++; + ++ aq->cq.cc++; + ci++; +- comp_num++; + if (ci == aq->depth) { + ci = 0; + phase = !phase; +@@ -465,10 +470,9 @@ static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq) + cqe = &aq->cq.entries[ci]; + } + +- aq->cq.cc += comp_num; + aq->cq.phase = phase; +- aq->sq.cc += comp_num; +- atomic64_add(comp_num, &aq->stats.completed_cmd); ++ aq->sq.cc += comp_cmds; ++ atomic64_add(comp_cmds, &aq->stats.completed_cmd); + } + + static int efa_com_comp_status_to_errno(u8 comp_status) +diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c +index 68bf41147a619..04e7f58553db1 100644 +--- a/drivers/media/usb/uvc/uvc_driver.c ++++ b/drivers/media/usb/uvc/uvc_driver.c +@@ -687,16 +687,26 @@ static int uvc_parse_streaming(struct uvc_device *dev, + goto error; + } + +- size = nformats * sizeof(*format) + nframes * sizeof(*frame) ++ /* ++ * Allocate memory for the formats, the frames and the intervals, ++ * plus any required padding to guarantee that everything has the ++ * correct alignment. ++ */ ++ size = nformats * sizeof(*format); ++ size = ALIGN(size, __alignof__(*frame)) + nframes * sizeof(*frame); ++ size = ALIGN(size, __alignof__(*interval)) + + nintervals * sizeof(*interval); ++ + format = kzalloc(size, GFP_KERNEL); +- if (format == NULL) { ++ if (!format) { + ret = -ENOMEM; + goto error; + } + +- frame = (struct uvc_frame *)&format[nformats]; +- interval = (u32 *)&frame[nframes]; ++ frame = (void *)format + nformats * sizeof(*format); ++ frame = PTR_ALIGN(frame, __alignof__(*frame)); ++ interval = (void *)frame + nframes * sizeof(*frame); ++ interval = PTR_ALIGN(interval, __alignof__(*interval)); + + streaming->formats = format; + streaming->nformats = 0; +diff --git a/drivers/media/v4l2-core/v4l2-cci.c b/drivers/media/v4l2-core/v4l2-cci.c +index ee3475bed37fa..1ff94affbaf3a 100644 +--- a/drivers/media/v4l2-core/v4l2-cci.c ++++ b/drivers/media/v4l2-core/v4l2-cci.c +@@ -23,6 +23,15 @@ int cci_read(struct regmap *map, u32 reg, u64 *val, int *err) + u8 buf[8]; + int ret; + ++ /* ++ * TODO: Fix smatch. Assign *val to 0 here in order to avoid ++ * failing a smatch check on caller when the caller proceeds to ++ * read *val without initialising it on caller's side. *val is set ++ * to a valid value whenever this function returns 0 but smatch ++ * can't figure that out currently. ++ */ ++ *val = 0; ++ + if (err && *err) + return *err; + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index 79ec6fcc9e259..57b0e26696e30 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -2369,7 +2369,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq + if (flush) + mlx5e_shampo_flush_skb(rq, cqe, match); + free_hd_entry: +- mlx5e_free_rx_shampo_hd_entry(rq, header_index); ++ if (likely(head_size)) ++ mlx5e_free_rx_shampo_hd_entry(rq, header_index); + mpwrq_cqe_out: + if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) + return; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +index 042ca03491243..d1db04baa1fa6 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +@@ -7,7 +7,7 @@ + /* don't try to optimize STE allocation if the stack is too constaraining */ + #define DR_RULE_MAX_STES_OPTIMIZED 0 + #else +-#define DR_RULE_MAX_STES_OPTIMIZED 5 ++#define DR_RULE_MAX_STES_OPTIMIZED 2 + #endif + #define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES) + +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c +index 7e6e1bed525af..9d724d228b831 100644 +--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c ++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c +@@ -234,7 +234,7 @@ static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) + name = dev_name(dev); + + snprintf(intr->name, sizeof(intr->name), +- "%s-%s-%s", IONIC_DRV_NAME, name, q->name); ++ "%.5s-%.16s-%.8s", IONIC_DRV_NAME, name, q->name); + + return devm_request_irq(dev, intr->vector, ionic_isr, + 0, intr->name, &qcq->napi); +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index b1380cf1b13ab..92c1500fa7c44 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -1438,6 +1438,7 @@ static const struct usb_device_id products[] = { + {QMI_QUIRK_SET_DTR(0x1546, 0x1312, 4)}, /* u-blox LARA-R6 01B */ + {QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */ + {QMI_QUIRK_SET_DTR(0x33f8, 0x0104, 4)}, /* Rolling RW101 RMNET */ ++ {QMI_FIXED_INTF(0x2dee, 0x4d22, 5)}, /* MeiG Smart SRM825L */ + + /* 4. Gobi 1000 devices */ + {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c +index 51ade909c84f0..bc01f2dafa948 100644 +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -2140,7 +2140,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget, + return packets; + } + +-static void virtnet_poll_cleantx(struct receive_queue *rq) ++static void virtnet_poll_cleantx(struct receive_queue *rq, int budget) + { + struct virtnet_info *vi = rq->vq->vdev->priv; + unsigned int index = vq2rxq(rq->vq); +@@ -2158,7 +2158,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq) + + do { + virtqueue_disable_cb(sq->vq); +- free_old_xmit_skbs(sq, true); ++ free_old_xmit_skbs(sq, !!budget); + } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); + + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) +@@ -2177,7 +2177,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) + unsigned int received; + unsigned int xdp_xmit = 0; + +- virtnet_poll_cleantx(rq); ++ virtnet_poll_cleantx(rq, budget); + + received = virtnet_receive(rq, budget, &xdp_xmit); + +@@ -2280,7 +2280,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) + txq = netdev_get_tx_queue(vi->dev, index); + __netif_tx_lock(txq, raw_smp_processor_id()); + virtqueue_disable_cb(sq->vq); +- free_old_xmit_skbs(sq, true); ++ free_old_xmit_skbs(sq, !!budget); + + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) + netif_tx_wake_queue(txq); +diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c +index a831d9474e9e0..83dc284392de2 100644 +--- a/drivers/net/wireless/ath/ath11k/qmi.c ++++ b/drivers/net/wireless/ath/ath11k/qmi.c +@@ -2293,7 +2293,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab, + struct qmi_txn txn; + const u8 *temp = data; + void __iomem *bdf_addr = NULL; +- int ret; ++ int ret = 0; + u32 remaining = len; + + req = kzalloc(sizeof(*req), GFP_KERNEL); +diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c +index f1379a5e60cdd..c49f585cc3965 100644 +--- a/drivers/net/wireless/ath/ath12k/qmi.c ++++ b/drivers/net/wireless/ath/ath12k/qmi.c +@@ -2312,7 +2312,7 @@ static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab, + struct qmi_wlanfw_bdf_download_resp_msg_v01 resp; + struct qmi_txn txn = {}; + const u8 *temp = data; +- int ret; ++ int ret = 0; + u32 remaining = len; + + req = kzalloc(sizeof(*req), GFP_KERNEL); +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c +index 3356e36e2af73..0b71a71ca240b 100644 +--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c ++++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c +@@ -230,8 +230,7 @@ static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf, + .data = { NULL, }, + }; + +- if (fwrt->ops && fwrt->ops->fw_running && +- !fwrt->ops->fw_running(fwrt->ops_ctx)) ++ if (!iwl_trans_fw_running(fwrt->trans)) + return -EIO; + + if (count < header_size + 1 || count > 1024 * 4) +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +index 702586945533e..5812b58c92b06 100644 +--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h ++++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +@@ -18,7 +18,6 @@ + struct iwl_fw_runtime_ops { + void (*dump_start)(void *ctx); + void (*dump_end)(void *ctx); +- bool (*fw_running)(void *ctx); + int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd); + bool (*d3_debug_enable)(void *ctx); + }; +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +index 5336a4afde4d2..945524470a1e9 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +@@ -702,11 +702,6 @@ static void iwl_mvm_fwrt_dump_end(void *ctx) + mutex_unlock(&mvm->mutex); + } + +-static bool iwl_mvm_fwrt_fw_running(void *ctx) +-{ +- return iwl_mvm_firmware_running(ctx); +-} +- + static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd) + { + struct iwl_mvm *mvm = (struct iwl_mvm *)ctx; +@@ -727,7 +722,6 @@ static bool iwl_mvm_d3_debug_enable(void *ctx) + static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { + .dump_start = iwl_mvm_fwrt_dump_start, + .dump_end = iwl_mvm_fwrt_dump_end, +- .fw_running = iwl_mvm_fwrt_fw_running, + .send_hcmd = iwl_mvm_fwrt_send_hcmd, + .d3_debug_enable = iwl_mvm_d3_debug_enable, + }; +diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c +index c1644353053f0..01b17b8f4ff9d 100644 +--- a/drivers/net/wireless/realtek/rtw89/ser.c ++++ b/drivers/net/wireless/realtek/rtw89/ser.c +@@ -308,9 +308,13 @@ static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) + + static void ser_sta_deinit_cam_iter(void *data, struct ieee80211_sta *sta) + { +- struct rtw89_vif *rtwvif = (struct rtw89_vif *)data; +- struct rtw89_dev *rtwdev = rtwvif->rtwdev; ++ struct rtw89_vif *target_rtwvif = (struct rtw89_vif *)data; + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; ++ struct rtw89_vif *rtwvif = rtwsta->rtwvif; ++ struct rtw89_dev *rtwdev = rtwvif->rtwdev; ++ ++ if (rtwvif != target_rtwvif) ++ return; + + if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls) + rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam); +diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c +index b8cb77c9c4bd2..3132b27bc0064 100644 +--- a/drivers/pci/controller/dwc/pcie-al.c ++++ b/drivers/pci/controller/dwc/pcie-al.c +@@ -242,18 +242,24 @@ static struct pci_ops al_child_pci_ops = { + .write = pci_generic_config_write, + }; + +-static void al_pcie_config_prepare(struct al_pcie *pcie) ++static int al_pcie_config_prepare(struct al_pcie *pcie) + { + struct al_pcie_target_bus_cfg *target_bus_cfg; + struct dw_pcie_rp *pp = &pcie->pci->pp; + unsigned int ecam_bus_mask; ++ struct resource_entry *ft; + u32 cfg_control_offset; ++ struct resource *bus; + u8 subordinate_bus; + u8 secondary_bus; + u32 cfg_control; + u32 reg; +- struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res; + ++ ft = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS); ++ if (!ft) ++ return -ENODEV; ++ ++ bus = ft->res; + target_bus_cfg = &pcie->target_bus_cfg; + + ecam_bus_mask = (pcie->ecam_size >> PCIE_ECAM_BUS_SHIFT) - 1; +@@ -287,6 +293,8 @@ static void al_pcie_config_prepare(struct al_pcie *pcie) + FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus); + + al_pcie_controller_writel(pcie, cfg_control_offset, reg); ++ ++ return 0; + } + + static int al_pcie_host_init(struct dw_pcie_rp *pp) +@@ -305,7 +313,9 @@ static int al_pcie_host_init(struct dw_pcie_rp *pp) + if (rc) + return rc; + +- al_pcie_config_prepare(pcie); ++ rc = al_pcie_config_prepare(pcie); ++ if (rc) ++ return rc; + + return 0; + } +diff --git a/drivers/platform/chrome/cros_ec_lpc_mec.c b/drivers/platform/chrome/cros_ec_lpc_mec.c +index 0d9c79b270ce2..63b6b261b8e58 100644 +--- a/drivers/platform/chrome/cros_ec_lpc_mec.c ++++ b/drivers/platform/chrome/cros_ec_lpc_mec.c +@@ -10,13 +10,65 @@ + + #include "cros_ec_lpc_mec.h" + ++#define ACPI_LOCK_DELAY_MS 500 ++ + /* + * This mutex must be held while accessing the EMI unit. We can't rely on the + * EC mutex because memmap data may be accessed without it being held. + */ + static DEFINE_MUTEX(io_mutex); ++/* ++ * An alternative mutex to be used when the ACPI AML code may also ++ * access memmap data. When set, this mutex is used in preference to ++ * io_mutex. ++ */ ++static acpi_handle aml_mutex; ++ + static u16 mec_emi_base, mec_emi_end; + ++/** ++ * cros_ec_lpc_mec_lock() - Acquire mutex for EMI ++ * ++ * @return: Negative error code, or zero for success ++ */ ++static int cros_ec_lpc_mec_lock(void) ++{ ++ bool success; ++ ++ if (!aml_mutex) { ++ mutex_lock(&io_mutex); ++ return 0; ++ } ++ ++ success = ACPI_SUCCESS(acpi_acquire_mutex(aml_mutex, ++ NULL, ACPI_LOCK_DELAY_MS)); ++ if (!success) ++ return -EBUSY; ++ ++ return 0; ++} ++ ++/** ++ * cros_ec_lpc_mec_unlock() - Release mutex for EMI ++ * ++ * @return: Negative error code, or zero for success ++ */ ++static int cros_ec_lpc_mec_unlock(void) ++{ ++ bool success; ++ ++ if (!aml_mutex) { ++ mutex_unlock(&io_mutex); ++ return 0; ++ } ++ ++ success = ACPI_SUCCESS(acpi_release_mutex(aml_mutex, NULL)); ++ if (!success) ++ return -EBUSY; ++ ++ return 0; ++} ++ + /** + * cros_ec_lpc_mec_emi_write_address() - Initialize EMI at a given address. + * +@@ -77,6 +129,7 @@ u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type, + int io_addr; + u8 sum = 0; + enum cros_ec_lpc_mec_emi_access_mode access, new_access; ++ int ret; + + /* Return checksum of 0 if window is not initialized */ + WARN_ON(mec_emi_base == 0 || mec_emi_end == 0); +@@ -92,7 +145,9 @@ u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type, + else + access = ACCESS_TYPE_LONG_AUTO_INCREMENT; + +- mutex_lock(&io_mutex); ++ ret = cros_ec_lpc_mec_lock(); ++ if (ret) ++ return ret; + + /* Initialize I/O at desired address */ + cros_ec_lpc_mec_emi_write_address(offset, access); +@@ -134,7 +189,9 @@ u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type, + } + + done: +- mutex_unlock(&io_mutex); ++ ret = cros_ec_lpc_mec_unlock(); ++ if (ret) ++ return ret; + + return sum; + } +@@ -146,3 +203,18 @@ void cros_ec_lpc_mec_init(unsigned int base, unsigned int end) + mec_emi_end = end; + } + EXPORT_SYMBOL(cros_ec_lpc_mec_init); ++ ++int cros_ec_lpc_mec_acpi_mutex(struct acpi_device *adev, const char *pathname) ++{ ++ int status; ++ ++ if (!adev) ++ return -ENOENT; ++ ++ status = acpi_get_handle(adev->handle, pathname, &aml_mutex); ++ if (ACPI_FAILURE(status)) ++ return -ENOENT; ++ ++ return 0; ++} ++EXPORT_SYMBOL(cros_ec_lpc_mec_acpi_mutex); +diff --git a/drivers/platform/chrome/cros_ec_lpc_mec.h b/drivers/platform/chrome/cros_ec_lpc_mec.h +index 9d0521b23e8ae..3f3af37e58a50 100644 +--- a/drivers/platform/chrome/cros_ec_lpc_mec.h ++++ b/drivers/platform/chrome/cros_ec_lpc_mec.h +@@ -8,6 +8,8 @@ + #ifndef __CROS_EC_LPC_MEC_H + #define __CROS_EC_LPC_MEC_H + ++#include ++ + enum cros_ec_lpc_mec_emi_access_mode { + /* 8-bit access */ + ACCESS_TYPE_BYTE = 0x0, +@@ -45,6 +47,15 @@ enum cros_ec_lpc_mec_io_type { + */ + void cros_ec_lpc_mec_init(unsigned int base, unsigned int end); + ++/** ++ * cros_ec_lpc_mec_acpi_mutex() - Find and set ACPI mutex for MEC ++ * ++ * @adev: Parent ACPI device ++ * @pathname: Name of AML mutex ++ * @return: Negative error code, or zero for success ++ */ ++int cros_ec_lpc_mec_acpi_mutex(struct acpi_device *adev, const char *pathname); ++ + /** + * cros_ec_lpc_mec_in_range() - Determine if addresses are in MEC EMI range. + * +diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c +index d4a89d2bb43bb..2e8568d6cde94 100644 +--- a/drivers/soc/qcom/smem.c ++++ b/drivers/soc/qcom/smem.c +@@ -359,6 +359,32 @@ static struct qcom_smem *__smem; + /* Timeout (ms) for the trylock of remote spinlocks */ + #define HWSPINLOCK_TIMEOUT 1000 + ++/* The qcom hwspinlock id is always plus one from the smem host id */ ++#define SMEM_HOST_ID_TO_HWSPINLOCK_ID(__x) ((__x) + 1) ++ ++/** ++ * qcom_smem_bust_hwspin_lock_by_host() - bust the smem hwspinlock for a host ++ * @host: remote processor id ++ * ++ * Busts the hwspin_lock for the given smem host id. This helper is intended ++ * for remoteproc drivers that manage remoteprocs with an equivalent smem ++ * driver instance in the remote firmware. Drivers can force a release of the ++ * smem hwspin_lock if the rproc unexpectedly goes into a bad state. ++ * ++ * Context: Process context. ++ * ++ * Returns: 0 on success, otherwise negative errno. ++ */ ++int qcom_smem_bust_hwspin_lock_by_host(unsigned int host) ++{ ++ /* This function is for remote procs, so ignore SMEM_HOST_APPS */ ++ if (host == SMEM_HOST_APPS || host >= SMEM_HOST_COUNT) ++ return -EINVAL; ++ ++ return hwspin_lock_bust(__smem->hwlock, SMEM_HOST_ID_TO_HWSPINLOCK_ID(host)); ++} ++EXPORT_SYMBOL_GPL(qcom_smem_bust_hwspin_lock_by_host); ++ + /** + * qcom_smem_is_available() - Check if SMEM is available + * +diff --git a/drivers/spi/spi-hisi-kunpeng.c b/drivers/spi/spi-hisi-kunpeng.c +index 77e9738e42f60..6910b4d4c427b 100644 +--- a/drivers/spi/spi-hisi-kunpeng.c ++++ b/drivers/spi/spi-hisi-kunpeng.c +@@ -495,6 +495,7 @@ static int hisi_spi_probe(struct platform_device *pdev) + host->transfer_one = hisi_spi_transfer_one; + host->handle_err = hisi_spi_handle_err; + host->dev.fwnode = dev->fwnode; ++ host->min_speed_hz = DIV_ROUND_UP(host->max_speed_hz, CLK_DIV_MAX); + + hisi_spi_hw_init(hs); + +diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c +index 94edac17b95f8..ad0ef5b6b8cf9 100644 +--- a/drivers/ufs/core/ufshcd.c ++++ b/drivers/ufs/core/ufshcd.c +@@ -2281,7 +2281,17 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) + return err; + } + ++ /* ++ * The UFSHCI 3.0 specification does not define MCQ_SUPPORT and ++ * LSDB_SUPPORT, but [31:29] as reserved bits with reset value 0s, which ++ * means we can simply read values regardless of version. ++ */ + hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities); ++ /* ++ * 0h: legacy single doorbell support is available ++ * 1h: indicate that legacy single doorbell support has been removed ++ */ ++ hba->lsdb_sup = !FIELD_GET(MASK_LSDB_SUPPORT, hba->capabilities); + if (!hba->mcq_sup) + return 0; + +@@ -6457,7 +6467,8 @@ static void ufshcd_err_handler(struct work_struct *work) + if (ufshcd_err_handling_should_stop(hba)) + goto skip_err_handling; + +- if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { ++ if ((hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) && ++ !hba->force_reset) { + bool ret; + + spin_unlock_irqrestore(hba->host->host_lock, flags); +@@ -10386,6 +10397,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) + } + + if (!is_mcq_supported(hba)) { ++ if (!hba->lsdb_sup) { ++ dev_err(hba->dev, "%s: failed to initialize (legacy doorbell mode not supported)\n", ++ __func__); ++ err = -EINVAL; ++ goto out_disable; ++ } + err = scsi_add_host(host, hba->dev); + if (err) { + dev_err(hba->dev, "scsi_add_host failed\n"); +diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h +index 2a886e58cd632..42c60eba5fb6e 100644 +--- a/drivers/usb/typec/ucsi/ucsi.h ++++ b/drivers/usb/typec/ucsi/ucsi.h +@@ -404,7 +404,7 @@ ucsi_register_displayport(struct ucsi_connector *con, + bool override, int offset, + struct typec_altmode_desc *desc) + { +- return NULL; ++ return typec_port_register_altmode(con->port, desc); + } + + static inline void +diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c +index fc01b31bbb875..6338d818bc8bc 100644 +--- a/drivers/usb/usbip/stub_rx.c ++++ b/drivers/usb/usbip/stub_rx.c +@@ -144,53 +144,62 @@ static int tweak_set_configuration_cmd(struct urb *urb) + if (err && err != -ENODEV) + dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n", + config, err); +- return 0; ++ return err; + } + + static int tweak_reset_device_cmd(struct urb *urb) + { + struct stub_priv *priv = (struct stub_priv *) urb->context; + struct stub_device *sdev = priv->sdev; ++ int err; + + dev_info(&urb->dev->dev, "usb_queue_reset_device\n"); + +- if (usb_lock_device_for_reset(sdev->udev, NULL) < 0) { ++ err = usb_lock_device_for_reset(sdev->udev, NULL); ++ if (err < 0) { + dev_err(&urb->dev->dev, "could not obtain lock to reset device\n"); +- return 0; ++ return err; + } +- usb_reset_device(sdev->udev); ++ err = usb_reset_device(sdev->udev); + usb_unlock_device(sdev->udev); + +- return 0; ++ return err; + } + + /* + * clear_halt, set_interface, and set_configuration require special tricks. ++ * Returns 1 if request was tweaked, 0 otherwise. + */ +-static void tweak_special_requests(struct urb *urb) ++static int tweak_special_requests(struct urb *urb) + { ++ int err; ++ + if (!urb || !urb->setup_packet) +- return; ++ return 0; + + if (usb_pipetype(urb->pipe) != PIPE_CONTROL) +- return; ++ return 0; + + if (is_clear_halt_cmd(urb)) + /* tweak clear_halt */ +- tweak_clear_halt_cmd(urb); ++ err = tweak_clear_halt_cmd(urb); + + else if (is_set_interface_cmd(urb)) + /* tweak set_interface */ +- tweak_set_interface_cmd(urb); ++ err = tweak_set_interface_cmd(urb); + + else if (is_set_configuration_cmd(urb)) + /* tweak set_configuration */ +- tweak_set_configuration_cmd(urb); ++ err = tweak_set_configuration_cmd(urb); + + else if (is_reset_device_cmd(urb)) +- tweak_reset_device_cmd(urb); +- else ++ err = tweak_reset_device_cmd(urb); ++ else { + usbip_dbg_stub_rx("no need to tweak\n"); ++ return 0; ++ } ++ ++ return !err; + } + + /* +@@ -468,6 +477,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, + int support_sg = 1; + int np = 0; + int ret, i; ++ int is_tweaked; + + if (pipe == -1) + return; +@@ -580,8 +590,11 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, + priv->urbs[i]->pipe = pipe; + priv->urbs[i]->complete = stub_complete; + +- /* no need to submit an intercepted request, but harmless? */ +- tweak_special_requests(priv->urbs[i]); ++ /* ++ * all URBs belong to a single PDU, so a global is_tweaked flag is ++ * enough ++ */ ++ is_tweaked = tweak_special_requests(priv->urbs[i]); + + masking_bogus_flags(priv->urbs[i]); + } +@@ -594,22 +607,32 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, + + /* urb is now ready to submit */ + for (i = 0; i < priv->num_urbs; i++) { +- ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL); ++ if (!is_tweaked) { ++ ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL); + +- if (ret == 0) +- usbip_dbg_stub_rx("submit urb ok, seqnum %u\n", +- pdu->base.seqnum); +- else { +- dev_err(&udev->dev, "submit_urb error, %d\n", ret); +- usbip_dump_header(pdu); +- usbip_dump_urb(priv->urbs[i]); ++ if (ret == 0) ++ usbip_dbg_stub_rx("submit urb ok, seqnum %u\n", ++ pdu->base.seqnum); ++ else { ++ dev_err(&udev->dev, "submit_urb error, %d\n", ret); ++ usbip_dump_header(pdu); ++ usbip_dump_urb(priv->urbs[i]); + ++ /* ++ * Pessimistic. ++ * This connection will be discarded. ++ */ ++ usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT); ++ break; ++ } ++ } else { + /* +- * Pessimistic. +- * This connection will be discarded. ++ * An identical URB was already submitted in ++ * tweak_special_requests(). Skip submitting this URB to not ++ * duplicate the request. + */ +- usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT); +- break; ++ priv->urbs[i]->status = 0; ++ stub_complete(priv->urbs[i]); + } + } + +diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c +index 7ff61909648d7..46c1f74983956 100644 +--- a/fs/btrfs/tree-checker.c ++++ b/fs/btrfs/tree-checker.c +@@ -1266,6 +1266,19 @@ static void extent_err(const struct extent_buffer *eb, int slot, + va_end(args); + } + ++static bool is_valid_dref_root(u64 rootid) ++{ ++ /* ++ * The following tree root objectids are allowed to have a data backref: ++ * - subvolume trees ++ * - data reloc tree ++ * - tree root ++ * For v1 space cache ++ */ ++ return is_fstree(rootid) || rootid == BTRFS_DATA_RELOC_TREE_OBJECTID || ++ rootid == BTRFS_ROOT_TREE_OBJECTID; ++} ++ + static int check_extent_item(struct extent_buffer *leaf, + struct btrfs_key *key, int slot, + struct btrfs_key *prev_key) +@@ -1418,6 +1431,8 @@ static int check_extent_item(struct extent_buffer *leaf, + struct btrfs_extent_data_ref *dref; + struct btrfs_shared_data_ref *sref; + u64 seq; ++ u64 dref_root; ++ u64 dref_objectid; + u64 dref_offset; + u64 inline_offset; + u8 inline_type; +@@ -1461,11 +1476,26 @@ static int check_extent_item(struct extent_buffer *leaf, + */ + case BTRFS_EXTENT_DATA_REF_KEY: + dref = (struct btrfs_extent_data_ref *)(&iref->offset); ++ dref_root = btrfs_extent_data_ref_root(leaf, dref); ++ dref_objectid = btrfs_extent_data_ref_objectid(leaf, dref); + dref_offset = btrfs_extent_data_ref_offset(leaf, dref); + seq = hash_extent_data_ref( + btrfs_extent_data_ref_root(leaf, dref), + btrfs_extent_data_ref_objectid(leaf, dref), + btrfs_extent_data_ref_offset(leaf, dref)); ++ if (unlikely(!is_valid_dref_root(dref_root))) { ++ extent_err(leaf, slot, ++ "invalid data ref root value %llu", ++ dref_root); ++ return -EUCLEAN; ++ } ++ if (unlikely(dref_objectid < BTRFS_FIRST_FREE_OBJECTID || ++ dref_objectid > BTRFS_LAST_FREE_OBJECTID)) { ++ extent_err(leaf, slot, ++ "invalid data ref objectid value %llu", ++ dref_root); ++ return -EUCLEAN; ++ } + if (unlikely(!IS_ALIGNED(dref_offset, + fs_info->sectorsize))) { + extent_err(leaf, slot, +@@ -1601,6 +1631,8 @@ static int check_extent_data_ref(struct extent_buffer *leaf, + return -EUCLEAN; + } + for (; ptr < end; ptr += sizeof(*dref)) { ++ u64 root; ++ u64 objectid; + u64 offset; + + /* +@@ -1608,7 +1640,22 @@ static int check_extent_data_ref(struct extent_buffer *leaf, + * overflow from the leaf due to hash collisions. + */ + dref = (struct btrfs_extent_data_ref *)ptr; ++ root = btrfs_extent_data_ref_root(leaf, dref); ++ objectid = btrfs_extent_data_ref_objectid(leaf, dref); + offset = btrfs_extent_data_ref_offset(leaf, dref); ++ if (unlikely(!is_valid_dref_root(root))) { ++ extent_err(leaf, slot, ++ "invalid extent data backref root value %llu", ++ root); ++ return -EUCLEAN; ++ } ++ if (unlikely(objectid < BTRFS_FIRST_FREE_OBJECTID || ++ objectid > BTRFS_LAST_FREE_OBJECTID)) { ++ extent_err(leaf, slot, ++ "invalid extent data backref objectid value %llu", ++ root); ++ return -EUCLEAN; ++ } + if (unlikely(!IS_ALIGNED(offset, leaf->fs_info->sectorsize))) { + extent_err(leaf, slot, + "invalid extent data backref offset, have %llu expect aligned to %u", +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h +index 00eff023cd9d6..6371b295fba68 100644 +--- a/fs/f2fs/f2fs.h ++++ b/fs/f2fs/f2fs.h +@@ -4148,7 +4148,7 @@ extern struct kmem_cache *f2fs_inode_entry_slab; + * inline.c + */ + bool f2fs_may_inline_data(struct inode *inode); +-bool f2fs_sanity_check_inline_data(struct inode *inode); ++bool f2fs_sanity_check_inline_data(struct inode *inode, struct page *ipage); + bool f2fs_may_inline_dentry(struct inode *inode); + void f2fs_do_read_inline_data(struct page *page, struct page *ipage); + void f2fs_truncate_inline_inode(struct inode *inode, +diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c +index 2cbe557f971e7..a3f8b4ed495ef 100644 +--- a/fs/f2fs/inline.c ++++ b/fs/f2fs/inline.c +@@ -33,11 +33,29 @@ bool f2fs_may_inline_data(struct inode *inode) + return !f2fs_post_read_required(inode); + } + +-bool f2fs_sanity_check_inline_data(struct inode *inode) ++static bool inode_has_blocks(struct inode *inode, struct page *ipage) ++{ ++ struct f2fs_inode *ri = F2FS_INODE(ipage); ++ int i; ++ ++ if (F2FS_HAS_BLOCKS(inode)) ++ return true; ++ ++ for (i = 0; i < DEF_NIDS_PER_INODE; i++) { ++ if (ri->i_nid[i]) ++ return true; ++ } ++ return false; ++} ++ ++bool f2fs_sanity_check_inline_data(struct inode *inode, struct page *ipage) + { + if (!f2fs_has_inline_data(inode)) + return false; + ++ if (inode_has_blocks(inode, ipage)) ++ return false; ++ + if (!support_inline_data(inode)) + return true; + +diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c +index 26e857fee631d..709b2f79872f2 100644 +--- a/fs/f2fs/inode.c ++++ b/fs/f2fs/inode.c +@@ -346,7 +346,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page) + } + } + +- if (f2fs_sanity_check_inline_data(inode)) { ++ if (f2fs_sanity_check_inline_data(inode, node_page)) { + f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix", + __func__, inode->i_ino, inode->i_mode); + return false; +diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c +index 892b1c44de531..299b6d6aaa795 100644 +--- a/fs/gfs2/quota.c ++++ b/fs/gfs2/quota.c +@@ -75,9 +75,6 @@ + #define GFS2_QD_HASH_SIZE BIT(GFS2_QD_HASH_SHIFT) + #define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1) + +-#define QC_CHANGE 0 +-#define QC_SYNC 1 +- + /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */ + /* -> sd_bitmap_lock */ + static DEFINE_SPINLOCK(qd_lock); +@@ -697,7 +694,7 @@ static int sort_qd(const void *a, const void *b) + return 0; + } + +-static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type) ++static void do_qc(struct gfs2_quota_data *qd, s64 change) + { + struct gfs2_sbd *sdp = qd->qd_sbd; + struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); +@@ -722,18 +719,16 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type) + qd->qd_change = x; + spin_unlock(&qd_lock); + +- if (qc_type == QC_CHANGE) { +- if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) { +- qd_hold(qd); +- slot_hold(qd); +- } +- } else { ++ if (!x) { + gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags)); + clear_bit(QDF_CHANGE, &qd->qd_flags); + qc->qc_flags = 0; + qc->qc_id = 0; + slot_put(qd); + qd_put(qd); ++ } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) { ++ qd_hold(qd); ++ slot_hold(qd); + } + + if (change < 0) /* Reset quiet flag if we freed some blocks */ +@@ -978,7 +973,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) + if (error) + goto out_end_trans; + +- do_qc(qd, -qd->qd_change_sync, QC_SYNC); ++ do_qc(qd, -qd->qd_change_sync); + set_bit(QDF_REFRESH, &qd->qd_flags); + } + +@@ -1300,7 +1295,7 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change, + + if (qid_eq(qd->qd_id, make_kqid_uid(uid)) || + qid_eq(qd->qd_id, make_kqid_gid(gid))) { +- do_qc(qd, change, QC_CHANGE); ++ do_qc(qd, change); + } + } + } +diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c +index fc3ecb180ac53..b65261e0cae3a 100644 +--- a/fs/gfs2/util.c ++++ b/fs/gfs2/util.c +@@ -99,12 +99,12 @@ int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, + */ + int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp) + { ++ int flags = LM_FLAG_NOEXP | GL_EXACT; + int error; + +- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, +- LM_FLAG_NOEXP | GL_EXACT, ++ error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, flags, + &sdp->sd_freeze_gh); +- if (error) ++ if (error && error != GLR_TRYFAILED) + fs_err(sdp, "can't lock the freeze glock: %d\n", error); + return error; + } +diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c +index 7974e91ffe134..b5d8f238fce42 100644 +--- a/fs/notify/fsnotify.c ++++ b/fs/notify/fsnotify.c +@@ -103,17 +103,13 @@ void fsnotify_sb_delete(struct super_block *sb) + * parent cares. Thus when an event happens on a child it can quickly tell + * if there is a need to find a parent and send the event to the parent. + */ +-void __fsnotify_update_child_dentry_flags(struct inode *inode) ++void fsnotify_set_children_dentry_flags(struct inode *inode) + { + struct dentry *alias; +- int watched; + + if (!S_ISDIR(inode->i_mode)) + return; + +- /* determine if the children should tell inode about their events */ +- watched = fsnotify_inode_watches_children(inode); +- + spin_lock(&inode->i_lock); + /* run all of the dentries associated with this inode. Since this is a + * directory, there damn well better only be one item on this list */ +@@ -129,10 +125,7 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode) + continue; + + spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED); +- if (watched) +- child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; +- else +- child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED; ++ child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; + spin_unlock(&child->d_lock); + } + spin_unlock(&alias->d_lock); +@@ -140,6 +133,24 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode) + spin_unlock(&inode->i_lock); + } + ++/* ++ * Lazily clear false positive PARENT_WATCHED flag for child whose parent had ++ * stopped watching children. ++ */ ++static void fsnotify_clear_child_dentry_flag(struct inode *pinode, ++ struct dentry *dentry) ++{ ++ spin_lock(&dentry->d_lock); ++ /* ++ * d_lock is a sufficient barrier to prevent observing a non-watched ++ * parent state from before the fsnotify_set_children_dentry_flags() ++ * or fsnotify_update_flags() call that had set PARENT_WATCHED. ++ */ ++ if (!fsnotify_inode_watches_children(pinode)) ++ dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED; ++ spin_unlock(&dentry->d_lock); ++} ++ + /* Are inode/sb/mount interested in parent and name info with this event? */ + static bool fsnotify_event_needs_parent(struct inode *inode, struct mount *mnt, + __u32 mask) +@@ -208,7 +219,7 @@ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, + p_inode = parent->d_inode; + p_mask = fsnotify_inode_watches_children(p_inode); + if (unlikely(parent_watched && !p_mask)) +- __fsnotify_update_child_dentry_flags(p_inode); ++ fsnotify_clear_child_dentry_flag(p_inode, dentry); + + /* + * Include parent/name in notification either if some notification +diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h +index fde74eb333cc9..2b4267de86e6b 100644 +--- a/fs/notify/fsnotify.h ++++ b/fs/notify/fsnotify.h +@@ -74,7 +74,7 @@ static inline void fsnotify_clear_marks_by_sb(struct super_block *sb) + * update the dentry->d_flags of all of inode's children to indicate if inode cares + * about events that happen to its children. + */ +-extern void __fsnotify_update_child_dentry_flags(struct inode *inode); ++extern void fsnotify_set_children_dentry_flags(struct inode *inode); + + extern struct kmem_cache *fsnotify_mark_connector_cachep; + +diff --git a/fs/notify/mark.c b/fs/notify/mark.c +index c74ef947447d6..4be6e883d492f 100644 +--- a/fs/notify/mark.c ++++ b/fs/notify/mark.c +@@ -176,6 +176,24 @@ static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) + return fsnotify_update_iref(conn, want_iref); + } + ++static bool fsnotify_conn_watches_children( ++ struct fsnotify_mark_connector *conn) ++{ ++ if (conn->type != FSNOTIFY_OBJ_TYPE_INODE) ++ return false; ++ ++ return fsnotify_inode_watches_children(fsnotify_conn_inode(conn)); ++} ++ ++static void fsnotify_conn_set_children_dentry_flags( ++ struct fsnotify_mark_connector *conn) ++{ ++ if (conn->type != FSNOTIFY_OBJ_TYPE_INODE) ++ return; ++ ++ fsnotify_set_children_dentry_flags(fsnotify_conn_inode(conn)); ++} ++ + /* + * Calculate mask of events for a list of marks. The caller must make sure + * connector and connector->obj cannot disappear under us. Callers achieve +@@ -184,15 +202,23 @@ static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) + */ + void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) + { ++ bool update_children; ++ + if (!conn) + return; + + spin_lock(&conn->lock); ++ update_children = !fsnotify_conn_watches_children(conn); + __fsnotify_recalc_mask(conn); ++ update_children &= fsnotify_conn_watches_children(conn); + spin_unlock(&conn->lock); +- if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) +- __fsnotify_update_child_dentry_flags( +- fsnotify_conn_inode(conn)); ++ /* ++ * Set children's PARENT_WATCHED flags only if parent started watching. ++ * When parent stops watching, we clear false positive PARENT_WATCHED ++ * flags lazily in __fsnotify_parent(). ++ */ ++ if (update_children) ++ fsnotify_conn_set_children_dentry_flags(conn); + } + + /* Free all connectors queued for freeing once SRCU period ends */ +diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c +index 28031c7ba6b19..15cbfec4c28c7 100644 +--- a/fs/smb/client/smb2inode.c ++++ b/fs/smb/client/smb2inode.c +@@ -950,7 +950,8 @@ int smb2_query_path_info(const unsigned int xid, + cmds[num_cmds++] = SMB2_OP_GET_REPARSE; + + oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, +- FILE_READ_ATTRIBUTES | FILE_READ_EA, ++ FILE_READ_ATTRIBUTES | ++ FILE_READ_EA | SYNCHRONIZE, + FILE_OPEN, create_options | + OPEN_REPARSE_POINT, ACL_NO_MODE); + cifs_get_readable_path(tcon, full_path, &cfile); +@@ -1258,7 +1259,8 @@ int smb2_query_reparse_point(const unsigned int xid, + cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path); + + cifs_get_readable_path(tcon, full_path, &cfile); +- oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_READ_ATTRIBUTES, ++ oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, ++ FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE, + FILE_OPEN, OPEN_REPARSE_POINT, ACL_NO_MODE); + rc = smb2_compound_op(xid, tcon, cifs_sb, + full_path, &oparms, &in_iov, +diff --git a/include/clocksource/timer-xilinx.h b/include/clocksource/timer-xilinx.h +index c0f56fe6d22ae..d116f18de899c 100644 +--- a/include/clocksource/timer-xilinx.h ++++ b/include/clocksource/timer-xilinx.h +@@ -41,7 +41,7 @@ struct regmap; + struct xilinx_timer_priv { + struct regmap *map; + struct clk *clk; +- u32 max; ++ u64 max; + }; + + /** +diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h +index c0892d75ce333..575415b513497 100644 +--- a/include/linux/fsnotify_backend.h ++++ b/include/linux/fsnotify_backend.h +@@ -563,12 +563,14 @@ static inline __u32 fsnotify_parent_needed_mask(__u32 mask) + + static inline int fsnotify_inode_watches_children(struct inode *inode) + { ++ __u32 parent_mask = READ_ONCE(inode->i_fsnotify_mask); ++ + /* FS_EVENT_ON_CHILD is set if the inode may care */ +- if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD)) ++ if (!(parent_mask & FS_EVENT_ON_CHILD)) + return 0; + /* this inode might care about child events, does it care about the + * specific set of events that can happen on a child? */ +- return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD; ++ return parent_mask & FS_EVENTS_POSS_ON_CHILD; + } + + /* +@@ -582,7 +584,7 @@ static inline void fsnotify_update_flags(struct dentry *dentry) + /* + * Serialisation of setting PARENT_WATCHED on the dentries is provided + * by d_lock. If inotify_inode_watched changes after we have taken +- * d_lock, the following __fsnotify_update_child_dentry_flags call will ++ * d_lock, the following fsnotify_set_children_dentry_flags call will + * find our entry, so it will spin until we complete here, and update + * us with the new state. + */ +diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h +index bfe7c1f1ac6d1..f0231dbc47771 100644 +--- a/include/linux/hwspinlock.h ++++ b/include/linux/hwspinlock.h +@@ -68,6 +68,7 @@ int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int, + int __hwspin_trylock(struct hwspinlock *, int, unsigned long *); + void __hwspin_unlock(struct hwspinlock *, int, unsigned long *); + int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name); ++int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id); + int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock); + struct hwspinlock *devm_hwspin_lock_request(struct device *dev); + struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev, +@@ -127,6 +128,11 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) + { + } + ++static inline int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id) ++{ ++ return 0; ++} ++ + static inline int of_hwspin_lock_get_id(struct device_node *np, int index) + { + return 0; +diff --git a/include/linux/i2c.h b/include/linux/i2c.h +index 0dae9db275380..32cf5708d5a5b 100644 +--- a/include/linux/i2c.h ++++ b/include/linux/i2c.h +@@ -1033,7 +1033,7 @@ static inline int of_i2c_get_board_info(struct device *dev, + struct acpi_resource; + struct acpi_resource_i2c_serialbus; + +-#if IS_ENABLED(CONFIG_ACPI) ++#if IS_REACHABLE(CONFIG_ACPI) && IS_REACHABLE(CONFIG_I2C) + bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, + struct acpi_resource_i2c_serialbus **i2c); + int i2c_acpi_client_count(struct acpi_device *adev); +diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h +index a36a3b9d4929e..03187bc958518 100644 +--- a/include/linux/soc/qcom/smem.h ++++ b/include/linux/soc/qcom/smem.h +@@ -14,4 +14,6 @@ phys_addr_t qcom_smem_virt_to_phys(void *p); + + int qcom_smem_get_soc_id(u32 *id); + ++int qcom_smem_bust_hwspin_lock_by_host(unsigned int host); ++ + #endif +diff --git a/include/net/ip.h b/include/net/ip.h +index 162cf2d2f841c..6f1ff4846451b 100644 +--- a/include/net/ip.h ++++ b/include/net/ip.h +@@ -497,8 +497,7 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk, + return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu); + } + +-struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx, +- int fc_mx_len, ++struct dst_metrics *ip_fib_metrics_init(struct nlattr *fc_mx, int fc_mx_len, + struct netlink_ext_ack *extack); + static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics) + { +diff --git a/include/net/tcp.h b/include/net/tcp.h +index cc3b56bf19e05..c206ffaa8ed70 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -1140,7 +1140,7 @@ extern struct tcp_congestion_ops tcp_reno; + + struct tcp_congestion_ops *tcp_ca_find(const char *name); + struct tcp_congestion_ops *tcp_ca_find_key(u32 key); +-u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca); ++u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca); + #ifdef CONFIG_INET + char *tcp_ca_get_name_by_key(u32 key, char *buffer); + #else +diff --git a/include/sound/ump_convert.h b/include/sound/ump_convert.h +index 28c364c63245d..d099ae27f8491 100644 +--- a/include/sound/ump_convert.h ++++ b/include/sound/ump_convert.h +@@ -13,6 +13,7 @@ struct ump_cvt_to_ump_bank { + unsigned char cc_nrpn_msb, cc_nrpn_lsb; + unsigned char cc_data_msb, cc_data_lsb; + unsigned char cc_bank_msb, cc_bank_lsb; ++ bool cc_data_msb_set, cc_data_lsb_set; + }; + + /* context for converting from MIDI1 byte stream to UMP packet */ +diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h +index e4da397360682..2a7d6f269d9e3 100644 +--- a/include/ufs/ufshcd.h ++++ b/include/ufs/ufshcd.h +@@ -1064,6 +1064,7 @@ struct ufs_hba { + bool ext_iid_sup; + bool scsi_host_added; + bool mcq_sup; ++ bool lsdb_sup; + bool mcq_enabled; + struct ufshcd_res_info res[RES_MAX]; + void __iomem *mcq_base; +diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h +index d5accacae6bca..ae93b30d25893 100644 +--- a/include/ufs/ufshci.h ++++ b/include/ufs/ufshci.h +@@ -75,6 +75,7 @@ enum { + MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000, + MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000, + MASK_CRYPTO_SUPPORT = 0x10000000, ++ MASK_LSDB_SUPPORT = 0x20000000, + MASK_MCQ_SUPPORT = 0x40000000, + }; + +diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c +index 06366acd27b08..e472cc37d7de4 100644 +--- a/kernel/dma/debug.c ++++ b/kernel/dma/debug.c +@@ -415,8 +415,11 @@ static unsigned long long phys_addr(struct dma_debug_entry *entry) + * dma_active_cacheline entry to track per event. dma_map_sg(), on the + * other hand, consumes a single dma_debug_entry, but inserts 'nents' + * entries into the tree. ++ * ++ * Use __GFP_NOWARN because the printk from an OOM, to netconsole, could end ++ * up right back in the DMA debugging code, leading to a deadlock. + */ +-static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC); ++static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC | __GFP_NOWARN); + static DEFINE_SPINLOCK(radix_lock); + #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) + #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) +diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h +index e9821a8422dbe..9eb43b501ff5c 100644 +--- a/kernel/rcu/tree.h ++++ b/kernel/rcu/tree.h +@@ -224,7 +224,6 @@ struct rcu_data { + struct swait_queue_head nocb_state_wq; /* For offloading state changes */ + struct task_struct *nocb_gp_kthread; + raw_spinlock_t nocb_lock; /* Guard following pair of fields. */ +- atomic_t nocb_lock_contended; /* Contention experienced. */ + int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ + struct timer_list nocb_timer; /* Enforce finite deferral. */ + unsigned long nocb_gp_adv_time; /* Last call_rcu() CB adv (jiffies). */ +diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h +index 2b24405b9cd2b..30b34f215ca35 100644 +--- a/kernel/rcu/tree_nocb.h ++++ b/kernel/rcu/tree_nocb.h +@@ -91,8 +91,7 @@ module_param(nocb_nobypass_lim_per_jiffy, int, 0); + + /* + * Acquire the specified rcu_data structure's ->nocb_bypass_lock. If the +- * lock isn't immediately available, increment ->nocb_lock_contended to +- * flag the contention. ++ * lock isn't immediately available, perform minimal sanity check. + */ + static void rcu_nocb_bypass_lock(struct rcu_data *rdp) + __acquires(&rdp->nocb_bypass_lock) +@@ -100,29 +99,12 @@ static void rcu_nocb_bypass_lock(struct rcu_data *rdp) + lockdep_assert_irqs_disabled(); + if (raw_spin_trylock(&rdp->nocb_bypass_lock)) + return; +- atomic_inc(&rdp->nocb_lock_contended); ++ /* ++ * Contention expected only when local enqueue collide with ++ * remote flush from kthreads. ++ */ + WARN_ON_ONCE(smp_processor_id() != rdp->cpu); +- smp_mb__after_atomic(); /* atomic_inc() before lock. */ + raw_spin_lock(&rdp->nocb_bypass_lock); +- smp_mb__before_atomic(); /* atomic_dec() after lock. */ +- atomic_dec(&rdp->nocb_lock_contended); +-} +- +-/* +- * Spinwait until the specified rcu_data structure's ->nocb_lock is +- * not contended. Please note that this is extremely special-purpose, +- * relying on the fact that at most two kthreads and one CPU contend for +- * this lock, and also that the two kthreads are guaranteed to have frequent +- * grace-period-duration time intervals between successive acquisitions +- * of the lock. This allows us to use an extremely simple throttling +- * mechanism, and further to apply it only to the CPU doing floods of +- * call_rcu() invocations. Don't try this at home! +- */ +-static void rcu_nocb_wait_contended(struct rcu_data *rdp) +-{ +- WARN_ON_ONCE(smp_processor_id() != rdp->cpu); +- while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended))) +- cpu_relax(); + } + + /* +@@ -510,7 +492,6 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, + } + + // We need to use the bypass. +- rcu_nocb_wait_contended(rdp); + rcu_nocb_bypass_lock(rdp); + ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); + rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */ +@@ -1668,12 +1649,11 @@ static void show_rcu_nocb_state(struct rcu_data *rdp) + + sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]); + sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]); +- pr_info(" CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n", ++ pr_info(" CB %d^%d->%d %c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n", + rdp->cpu, rdp->nocb_gp_rdp->cpu, + nocb_next_rdp ? nocb_next_rdp->cpu : -1, + "kK"[!!rdp->nocb_cb_kthread], + "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)], +- "cC"[!!atomic_read(&rdp->nocb_lock_contended)], + "lL"[raw_spin_is_locked(&rdp->nocb_lock)], + "sS"[!!rdp->nocb_cb_sleep], + ".W"[swait_active(&rdp->nocb_cb_wq)], +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c +index e3268615a65a1..233d9d0437c27 100644 +--- a/net/ipv4/fib_semantics.c ++++ b/net/ipv4/fib_semantics.c +@@ -1030,7 +1030,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi) + bool ecn_ca = false; + + nla_strscpy(tmp, nla, sizeof(tmp)); +- val = tcp_ca_get_key_by_name(fi->fib_net, tmp, &ecn_ca); ++ val = tcp_ca_get_key_by_name(tmp, &ecn_ca); + } else { + if (nla_len(nla) != sizeof(u32)) + return false; +@@ -1459,8 +1459,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg, + fi = kzalloc(struct_size(fi, fib_nh, nhs), GFP_KERNEL); + if (!fi) + goto failure; +- fi->fib_metrics = ip_fib_metrics_init(fi->fib_net, cfg->fc_mx, +- cfg->fc_mx_len, extack); ++ fi->fib_metrics = ip_fib_metrics_init(cfg->fc_mx, cfg->fc_mx_len, extack); + if (IS_ERR(fi->fib_metrics)) { + err = PTR_ERR(fi->fib_metrics); + kfree(fi); +diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c +index 0e3ee1532848c..8ddac1f595ed8 100644 +--- a/net/ipv4/metrics.c ++++ b/net/ipv4/metrics.c +@@ -7,7 +7,7 @@ + #include + #include + +-static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, ++static int ip_metrics_convert(struct nlattr *fc_mx, + int fc_mx_len, u32 *metrics, + struct netlink_ext_ack *extack) + { +@@ -31,7 +31,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, + char tmp[TCP_CA_NAME_MAX]; + + nla_strscpy(tmp, nla, sizeof(tmp)); +- val = tcp_ca_get_key_by_name(net, tmp, &ecn_ca); ++ val = tcp_ca_get_key_by_name(tmp, &ecn_ca); + if (val == TCP_CA_UNSPEC) { + NL_SET_ERR_MSG(extack, "Unknown tcp congestion algorithm"); + return -EINVAL; +@@ -63,7 +63,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, + return 0; + } + +-struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx, ++struct dst_metrics *ip_fib_metrics_init(struct nlattr *fc_mx, + int fc_mx_len, + struct netlink_ext_ack *extack) + { +@@ -77,7 +77,7 @@ struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx, + if (unlikely(!fib_metrics)) + return ERR_PTR(-ENOMEM); + +- err = ip_metrics_convert(net, fc_mx, fc_mx_len, fib_metrics->metrics, ++ err = ip_metrics_convert(fc_mx, fc_mx_len, fib_metrics->metrics, + extack); + if (!err) { + refcount_set(&fib_metrics->refcnt, 1); +diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c +index 1b34050a7538b..95dbb2799be46 100644 +--- a/net/ipv4/tcp_cong.c ++++ b/net/ipv4/tcp_cong.c +@@ -46,8 +46,7 @@ void tcp_set_ca_state(struct sock *sk, const u8 ca_state) + } + + /* Must be called with rcu lock held */ +-static struct tcp_congestion_ops *tcp_ca_find_autoload(struct net *net, +- const char *name) ++static struct tcp_congestion_ops *tcp_ca_find_autoload(const char *name) + { + struct tcp_congestion_ops *ca = tcp_ca_find(name); + +@@ -182,7 +181,7 @@ int tcp_update_congestion_control(struct tcp_congestion_ops *ca, struct tcp_cong + return ret; + } + +-u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca) ++u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca) + { + const struct tcp_congestion_ops *ca; + u32 key = TCP_CA_UNSPEC; +@@ -190,7 +189,7 @@ u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca) + might_sleep(); + + rcu_read_lock(); +- ca = tcp_ca_find_autoload(net, name); ++ ca = tcp_ca_find_autoload(name); + if (ca) { + key = ca->key; + *ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN; +@@ -287,7 +286,7 @@ int tcp_set_default_congestion_control(struct net *net, const char *name) + int ret; + + rcu_read_lock(); +- ca = tcp_ca_find_autoload(net, name); ++ ca = tcp_ca_find_autoload(name); + if (!ca) { + ret = -ENOENT; + } else if (!bpf_try_module_get(ca, ca->owner)) { +@@ -425,7 +424,7 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, + if (!load) + ca = tcp_ca_find(name); + else +- ca = tcp_ca_find_autoload(sock_net(sk), name); ++ ca = tcp_ca_find_autoload(name); + + /* No change asking for existing value */ + if (ca == icsk->icsk_ca_ops) { +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 49ef5623c55e2..0299886dbeb91 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -3754,7 +3754,7 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, + if (!rt) + goto out; + +- rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len, ++ rt->fib6_metrics = ip_fib_metrics_init(cfg->fc_mx, cfg->fc_mx_len, + extack); + if (IS_ERR(rt->fib6_metrics)) { + err = PTR_ERR(rt->fib6_metrics); +diff --git a/net/mac80211/main.c b/net/mac80211/main.c +index 066424e62ff09..71d60f57a886c 100644 +--- a/net/mac80211/main.c ++++ b/net/mac80211/main.c +@@ -215,6 +215,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, + + might_sleep(); + ++ WARN_ON_ONCE(ieee80211_vif_is_mld(&sdata->vif)); ++ + if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + return; + +@@ -247,7 +249,6 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, + if (changed & ~BSS_CHANGED_VIF_CFG_FLAGS) { + u64 ch = changed & ~BSS_CHANGED_VIF_CFG_FLAGS; + +- /* FIXME: should be for each link */ + trace_drv_link_info_changed(local, sdata, &sdata->vif.bss_conf, + changed); + if (local->ops->link_info_changed) +diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c +index ad28da655f8bc..a29ff901df758 100644 +--- a/net/mptcp/fastopen.c ++++ b/net/mptcp/fastopen.c +@@ -68,12 +68,12 @@ void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflo + skb = skb_peek_tail(&sk->sk_receive_queue); + if (skb) { + WARN_ON_ONCE(MPTCP_SKB_CB(skb)->end_seq); +- pr_debug("msk %p moving seq %llx -> %llx end_seq %llx -> %llx", sk, ++ pr_debug("msk %p moving seq %llx -> %llx end_seq %llx -> %llx\n", sk, + MPTCP_SKB_CB(skb)->map_seq, MPTCP_SKB_CB(skb)->map_seq + msk->ack_seq, + MPTCP_SKB_CB(skb)->end_seq, MPTCP_SKB_CB(skb)->end_seq + msk->ack_seq); + MPTCP_SKB_CB(skb)->map_seq += msk->ack_seq; + MPTCP_SKB_CB(skb)->end_seq += msk->ack_seq; + } + +- pr_debug("msk=%p ack_seq=%llx", msk, msk->ack_seq); ++ pr_debug("msk=%p ack_seq=%llx\n", msk, msk->ack_seq); + } +diff --git a/net/mptcp/options.c b/net/mptcp/options.c +index 604724cca887f..2ad9006a157ae 100644 +--- a/net/mptcp/options.c ++++ b/net/mptcp/options.c +@@ -117,7 +117,7 @@ static void mptcp_parse_option(const struct sk_buff *skb, + mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD; + ptr += 2; + } +- pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u", ++ pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u\n", + version, flags, opsize, mp_opt->sndr_key, + mp_opt->rcvr_key, mp_opt->data_len, mp_opt->csum); + break; +@@ -131,7 +131,7 @@ static void mptcp_parse_option(const struct sk_buff *skb, + ptr += 4; + mp_opt->nonce = get_unaligned_be32(ptr); + ptr += 4; +- pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u", ++ pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u\n", + mp_opt->backup, mp_opt->join_id, + mp_opt->token, mp_opt->nonce); + } else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) { +@@ -142,19 +142,19 @@ static void mptcp_parse_option(const struct sk_buff *skb, + ptr += 8; + mp_opt->nonce = get_unaligned_be32(ptr); + ptr += 4; +- pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u", ++ pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u\n", + mp_opt->backup, mp_opt->join_id, + mp_opt->thmac, mp_opt->nonce); + } else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) { + mp_opt->suboptions |= OPTION_MPTCP_MPJ_ACK; + ptr += 2; + memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN); +- pr_debug("MP_JOIN hmac"); ++ pr_debug("MP_JOIN hmac\n"); + } + break; + + case MPTCPOPT_DSS: +- pr_debug("DSS"); ++ pr_debug("DSS\n"); + ptr++; + + /* we must clear 'mpc_map' be able to detect MP_CAPABLE +@@ -169,7 +169,7 @@ static void mptcp_parse_option(const struct sk_buff *skb, + mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0; + mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK); + +- pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d", ++ pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d\n", + mp_opt->data_fin, mp_opt->dsn64, + mp_opt->use_map, mp_opt->ack64, + mp_opt->use_ack); +@@ -207,7 +207,7 @@ static void mptcp_parse_option(const struct sk_buff *skb, + ptr += 4; + } + +- pr_debug("data_ack=%llu", mp_opt->data_ack); ++ pr_debug("data_ack=%llu\n", mp_opt->data_ack); + } + + if (mp_opt->use_map) { +@@ -231,7 +231,7 @@ static void mptcp_parse_option(const struct sk_buff *skb, + ptr += 2; + } + +- pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u", ++ pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n", + mp_opt->data_seq, mp_opt->subflow_seq, + mp_opt->data_len, !!(mp_opt->suboptions & OPTION_MPTCP_CSUMREQD), + mp_opt->csum); +@@ -293,7 +293,7 @@ static void mptcp_parse_option(const struct sk_buff *skb, + mp_opt->ahmac = get_unaligned_be64(ptr); + ptr += 8; + } +- pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d", ++ pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d\n", + (mp_opt->addr.family == AF_INET6) ? "6" : "", + mp_opt->addr.id, mp_opt->ahmac, mp_opt->echo, ntohs(mp_opt->addr.port)); + break; +@@ -309,7 +309,7 @@ static void mptcp_parse_option(const struct sk_buff *skb, + mp_opt->rm_list.nr = opsize - TCPOLEN_MPTCP_RM_ADDR_BASE; + for (i = 0; i < mp_opt->rm_list.nr; i++) + mp_opt->rm_list.ids[i] = *ptr++; +- pr_debug("RM_ADDR: rm_list_nr=%d", mp_opt->rm_list.nr); ++ pr_debug("RM_ADDR: rm_list_nr=%d\n", mp_opt->rm_list.nr); + break; + + case MPTCPOPT_MP_PRIO: +@@ -318,7 +318,7 @@ static void mptcp_parse_option(const struct sk_buff *skb, + + mp_opt->suboptions |= OPTION_MPTCP_PRIO; + mp_opt->backup = *ptr++ & MPTCP_PRIO_BKUP; +- pr_debug("MP_PRIO: prio=%d", mp_opt->backup); ++ pr_debug("MP_PRIO: prio=%d\n", mp_opt->backup); + break; + + case MPTCPOPT_MP_FASTCLOSE: +@@ -329,7 +329,7 @@ static void mptcp_parse_option(const struct sk_buff *skb, + mp_opt->rcvr_key = get_unaligned_be64(ptr); + ptr += 8; + mp_opt->suboptions |= OPTION_MPTCP_FASTCLOSE; +- pr_debug("MP_FASTCLOSE: recv_key=%llu", mp_opt->rcvr_key); ++ pr_debug("MP_FASTCLOSE: recv_key=%llu\n", mp_opt->rcvr_key); + break; + + case MPTCPOPT_RST: +@@ -343,7 +343,7 @@ static void mptcp_parse_option(const struct sk_buff *skb, + flags = *ptr++; + mp_opt->reset_transient = flags & MPTCP_RST_TRANSIENT; + mp_opt->reset_reason = *ptr; +- pr_debug("MP_RST: transient=%u reason=%u", ++ pr_debug("MP_RST: transient=%u reason=%u\n", + mp_opt->reset_transient, mp_opt->reset_reason); + break; + +@@ -354,7 +354,7 @@ static void mptcp_parse_option(const struct sk_buff *skb, + ptr += 2; + mp_opt->suboptions |= OPTION_MPTCP_FAIL; + mp_opt->fail_seq = get_unaligned_be64(ptr); +- pr_debug("MP_FAIL: data_seq=%llu", mp_opt->fail_seq); ++ pr_debug("MP_FAIL: data_seq=%llu\n", mp_opt->fail_seq); + break; + + default: +@@ -417,7 +417,7 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb, + *size = TCPOLEN_MPTCP_MPC_SYN; + return true; + } else if (subflow->request_join) { +- pr_debug("remote_token=%u, nonce=%u", subflow->remote_token, ++ pr_debug("remote_token=%u, nonce=%u\n", subflow->remote_token, + subflow->local_nonce); + opts->suboptions = OPTION_MPTCP_MPJ_SYN; + opts->join_id = subflow->local_id; +@@ -500,7 +500,7 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb, + *size = TCPOLEN_MPTCP_MPC_ACK; + } + +- pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d", ++ pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d\n", + subflow, subflow->local_key, subflow->remote_key, + data_len); + +@@ -509,7 +509,7 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb, + opts->suboptions = OPTION_MPTCP_MPJ_ACK; + memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN); + *size = TCPOLEN_MPTCP_MPJ_ACK; +- pr_debug("subflow=%p", subflow); ++ pr_debug("subflow=%p\n", subflow); + + /* we can use the full delegate action helper only from BH context + * If we are in process context - sk is flushing the backlog at +@@ -675,7 +675,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff * + + *size = len; + if (drop_other_suboptions) { +- pr_debug("drop other suboptions"); ++ pr_debug("drop other suboptions\n"); + opts->suboptions = 0; + + /* note that e.g. DSS could have written into the memory +@@ -695,7 +695,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff * + } else { + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADDTX); + } +- pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d", ++ pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d\n", + opts->addr.id, opts->ahmac, echo, ntohs(opts->addr.port)); + + return true; +@@ -726,7 +726,7 @@ static bool mptcp_established_options_rm_addr(struct sock *sk, + opts->rm_list = rm_list; + + for (i = 0; i < opts->rm_list.nr; i++) +- pr_debug("rm_list_ids[%d]=%d", i, opts->rm_list.ids[i]); ++ pr_debug("rm_list_ids[%d]=%d\n", i, opts->rm_list.ids[i]); + MPTCP_ADD_STATS(sock_net(sk), MPTCP_MIB_RMADDRTX, opts->rm_list.nr); + return true; + } +@@ -752,7 +752,7 @@ static bool mptcp_established_options_mp_prio(struct sock *sk, + opts->suboptions |= OPTION_MPTCP_PRIO; + opts->backup = subflow->request_bkup; + +- pr_debug("prio=%d", opts->backup); ++ pr_debug("prio=%d\n", opts->backup); + + return true; + } +@@ -794,7 +794,7 @@ static bool mptcp_established_options_fastclose(struct sock *sk, + opts->suboptions |= OPTION_MPTCP_FASTCLOSE; + opts->rcvr_key = msk->remote_key; + +- pr_debug("FASTCLOSE key=%llu", opts->rcvr_key); ++ pr_debug("FASTCLOSE key=%llu\n", opts->rcvr_key); + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSETX); + return true; + } +@@ -816,7 +816,7 @@ static bool mptcp_established_options_mp_fail(struct sock *sk, + opts->suboptions |= OPTION_MPTCP_FAIL; + opts->fail_seq = subflow->map_seq; + +- pr_debug("MP_FAIL fail_seq=%llu", opts->fail_seq); ++ pr_debug("MP_FAIL fail_seq=%llu\n", opts->fail_seq); + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX); + + return true; +@@ -904,7 +904,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size, + opts->csum_reqd = subflow_req->csum_reqd; + opts->allow_join_id0 = subflow_req->allow_join_id0; + *size = TCPOLEN_MPTCP_MPC_SYNACK; +- pr_debug("subflow_req=%p, local_key=%llu", ++ pr_debug("subflow_req=%p, local_key=%llu\n", + subflow_req, subflow_req->local_key); + return true; + } else if (subflow_req->mp_join) { +@@ -913,7 +913,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size, + opts->join_id = subflow_req->local_id; + opts->thmac = subflow_req->thmac; + opts->nonce = subflow_req->local_nonce; +- pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u", ++ pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u\n", + subflow_req, opts->backup, opts->join_id, + opts->thmac, opts->nonce); + *size = TCPOLEN_MPTCP_MPJ_SYNACK; +diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c +index c42496bcb5e09..157a574fab0cc 100644 +--- a/net/mptcp/pm.c ++++ b/net/mptcp/pm.c +@@ -20,7 +20,7 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk, + { + u8 add_addr = READ_ONCE(msk->pm.addr_signal); + +- pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo); ++ pr_debug("msk=%p, local_id=%d, echo=%d\n", msk, addr->id, echo); + + lockdep_assert_held(&msk->pm.lock); + +@@ -46,7 +46,7 @@ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_ + { + u8 rm_addr = READ_ONCE(msk->pm.addr_signal); + +- pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr); ++ pr_debug("msk=%p, rm_list_nr=%d\n", msk, rm_list->nr); + + if (rm_addr) { + MPTCP_ADD_STATS(sock_net((struct sock *)msk), +@@ -67,7 +67,7 @@ void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int + { + struct mptcp_pm_data *pm = &msk->pm; + +- pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side); ++ pr_debug("msk=%p, token=%u side=%d\n", msk, msk->token, server_side); + + WRITE_ONCE(pm->server_side, server_side); + mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC); +@@ -91,7 +91,7 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk) + + subflows_max = mptcp_pm_get_subflows_max(msk); + +- pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows, ++ pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk, pm->subflows, + subflows_max, READ_ONCE(pm->accept_subflow)); + + /* try to avoid acquiring the lock below */ +@@ -115,7 +115,7 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk) + static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, + enum mptcp_pm_status new_status) + { +- pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status, ++ pr_debug("msk=%p status=%x new=%lx\n", msk, msk->pm.status, + BIT(new_status)); + if (msk->pm.status & BIT(new_status)) + return false; +@@ -130,7 +130,7 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk) + struct mptcp_pm_data *pm = &msk->pm; + bool announce = false; + +- pr_debug("msk=%p", msk); ++ pr_debug("msk=%p\n", msk); + + spin_lock_bh(&pm->lock); + +@@ -154,14 +154,14 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk) + + void mptcp_pm_connection_closed(struct mptcp_sock *msk) + { +- pr_debug("msk=%p", msk); ++ pr_debug("msk=%p\n", msk); + } + + void mptcp_pm_subflow_established(struct mptcp_sock *msk) + { + struct mptcp_pm_data *pm = &msk->pm; + +- pr_debug("msk=%p", msk); ++ pr_debug("msk=%p\n", msk); + + if (!READ_ONCE(pm->work_pending)) + return; +@@ -213,7 +213,7 @@ void mptcp_pm_add_addr_received(const struct sock *ssk, + struct mptcp_sock *msk = mptcp_sk(subflow->conn); + struct mptcp_pm_data *pm = &msk->pm; + +- pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id, ++ pr_debug("msk=%p remote_id=%d accept=%d\n", msk, addr->id, + READ_ONCE(pm->accept_addr)); + + mptcp_event_addr_announced(ssk, addr); +@@ -246,7 +246,7 @@ void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk, + { + struct mptcp_pm_data *pm = &msk->pm; + +- pr_debug("msk=%p", msk); ++ pr_debug("msk=%p\n", msk); + + spin_lock_bh(&pm->lock); + +@@ -270,7 +270,7 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, + struct mptcp_pm_data *pm = &msk->pm; + u8 i; + +- pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr); ++ pr_debug("msk=%p remote_ids_nr=%d\n", msk, rm_list->nr); + + for (i = 0; i < rm_list->nr; i++) + mptcp_event_addr_removed(msk, rm_list->ids[i]); +@@ -302,19 +302,19 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq) + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); + struct mptcp_sock *msk = mptcp_sk(subflow->conn); + +- pr_debug("fail_seq=%llu", fail_seq); ++ pr_debug("fail_seq=%llu\n", fail_seq); + + if (!READ_ONCE(msk->allow_infinite_fallback)) + return; + + if (!subflow->fail_tout) { +- pr_debug("send MP_FAIL response and infinite map"); ++ pr_debug("send MP_FAIL response and infinite map\n"); + + subflow->send_mp_fail = 1; + subflow->send_infinite_map = 1; + tcp_send_ack(sk); + } else { +- pr_debug("MP_FAIL response received"); ++ pr_debug("MP_FAIL response received\n"); + WRITE_ONCE(subflow->fail_tout, 0); + } + } +diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c +index 7dd10bacc8d28..d5902e7f47a78 100644 +--- a/net/mptcp/pm_netlink.c ++++ b/net/mptcp/pm_netlink.c +@@ -295,7 +295,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer) + struct mptcp_sock *msk = entry->sock; + struct sock *sk = (struct sock *)msk; + +- pr_debug("msk=%p", msk); ++ pr_debug("msk=%p\n", msk); + + if (!msk) + return; +@@ -314,7 +314,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer) + spin_lock_bh(&msk->pm.lock); + + if (!mptcp_pm_should_add_signal_addr(msk)) { +- pr_debug("retransmit ADD_ADDR id=%d", entry->addr.id); ++ pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id); + mptcp_pm_announce_addr(msk, &entry->addr, false); + mptcp_pm_add_addr_send_ack(msk); + entry->retrans_times++; +@@ -395,7 +395,7 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk) + struct sock *sk = (struct sock *)msk; + LIST_HEAD(free_list); + +- pr_debug("msk=%p", msk); ++ pr_debug("msk=%p\n", msk); + + spin_lock_bh(&msk->pm.lock); + list_splice_init(&msk->pm.anno_list, &free_list); +@@ -481,7 +481,7 @@ static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_con + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + bool slow; + +- pr_debug("send ack for %s", ++ pr_debug("send ack for %s\n", + prio ? "mp_prio" : (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr")); + + slow = lock_sock_fast(ssk); +@@ -730,7 +730,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk) + add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk); + subflows_max = mptcp_pm_get_subflows_max(msk); + +- pr_debug("accepted %d:%d remote family %d", ++ pr_debug("accepted %d:%d remote family %d\n", + msk->pm.add_addr_accepted, add_addr_accept_max, + msk->pm.remote.family); + +@@ -803,7 +803,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, + { + struct mptcp_subflow_context *subflow; + +- pr_debug("bkup=%d", bkup); ++ pr_debug("bkup=%d\n", bkup); + + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); +@@ -826,11 +826,6 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, + return -EINVAL; + } + +-static bool mptcp_local_id_match(const struct mptcp_sock *msk, u8 local_id, u8 id) +-{ +- return local_id == id || (!local_id && msk->mpc_endpoint_id == id); +-} +- + static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk, + const struct mptcp_rm_list *rm_list, + enum linux_mptcp_mib_field rm_type) +@@ -839,7 +834,7 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk, + struct sock *sk = (struct sock *)msk; + u8 i; + +- pr_debug("%s rm_list_nr %d", ++ pr_debug("%s rm_list_nr %d\n", + rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr); + + msk_owned_by_me(msk); +@@ -867,10 +862,10 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk, + continue; + if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id) + continue; +- if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id)) ++ if (rm_type == MPTCP_MIB_RMSUBFLOW && id != rm_id) + continue; + +- pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u", ++ pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u\n", + rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", + i, rm_id, id, remote_id, msk->mpc_endpoint_id); + spin_unlock_bh(&msk->pm.lock); +@@ -927,7 +922,7 @@ void mptcp_pm_nl_work(struct mptcp_sock *msk) + + spin_lock_bh(&msk->pm.lock); + +- pr_debug("msk=%p status=%x", msk, pm->status); ++ pr_debug("msk=%p status=%x\n", msk, pm->status); + if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) { + pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED); + mptcp_pm_nl_add_addr_received(msk); +@@ -1506,6 +1501,12 @@ static bool remove_anno_list_by_saddr(struct mptcp_sock *msk, + return false; + } + ++static u8 mptcp_endp_get_local_id(struct mptcp_sock *msk, ++ const struct mptcp_addr_info *addr) ++{ ++ return msk->mpc_endpoint_id == addr->id ? 0 : addr->id; ++} ++ + static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk, + const struct mptcp_addr_info *addr, + bool force) +@@ -1513,7 +1514,7 @@ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk, + struct mptcp_rm_list list = { .nr = 0 }; + bool ret; + +- list.ids[list.nr++] = addr->id; ++ list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr); + + ret = remove_anno_list_by_saddr(msk, addr); + if (ret || force) { +@@ -1540,13 +1541,11 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net, + const struct mptcp_pm_addr_entry *entry) + { + const struct mptcp_addr_info *addr = &entry->addr; +- struct mptcp_rm_list list = { .nr = 0 }; ++ struct mptcp_rm_list list = { .nr = 1 }; + long s_slot = 0, s_num = 0; + struct mptcp_sock *msk; + +- pr_debug("remove_id=%d", addr->id); +- +- list.ids[list.nr++] = addr->id; ++ pr_debug("remove_id=%d\n", addr->id); + + while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { + struct sock *sk = (struct sock *)msk; +@@ -1565,6 +1564,7 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net, + mptcp_pm_remove_anno_addr(msk, addr, remove_subflow && + !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)); + ++ list.ids[0] = mptcp_endp_get_local_id(msk, addr); + if (remove_subflow) { + spin_lock_bh(&msk->pm.lock); + mptcp_pm_nl_rm_subflow_received(msk, &list); +@@ -1673,6 +1673,7 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info) + return ret; + } + ++/* Called from the userspace PM only */ + void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list) + { + struct mptcp_rm_list alist = { .nr = 0 }; +@@ -1701,8 +1702,9 @@ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list) + } + } + +-void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk, +- struct list_head *rm_list) ++/* Called from the in-kernel PM only */ ++static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk, ++ struct list_head *rm_list) + { + struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 }; + struct mptcp_pm_addr_entry *entry; +@@ -1710,11 +1712,11 @@ void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk, + list_for_each_entry(entry, rm_list, list) { + if (slist.nr < MPTCP_RM_IDS_MAX && + lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) +- slist.ids[slist.nr++] = entry->addr.id; ++ slist.ids[slist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr); + + if (alist.nr < MPTCP_RM_IDS_MAX && + remove_anno_list_by_saddr(msk, &entry->addr)) +- alist.ids[alist.nr++] = entry->addr.id; ++ alist.ids[alist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr); + } + + spin_lock_bh(&msk->pm.lock); +@@ -2002,7 +2004,7 @@ static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk, + { + struct mptcp_rm_list list = { .nr = 0 }; + +- list.ids[list.nr++] = addr->id; ++ list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr); + + spin_lock_bh(&msk->pm.lock); + mptcp_pm_nl_rm_subflow_received(msk, &list); +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c +index 0ae684624c820..ba6248372aee7 100644 +--- a/net/mptcp/protocol.c ++++ b/net/mptcp/protocol.c +@@ -141,7 +141,7 @@ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to, + !skb_try_coalesce(to, from, &fragstolen, &delta)) + return false; + +- pr_debug("colesced seq %llx into %llx new len %d new end seq %llx", ++ pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n", + MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq, + to->len, MPTCP_SKB_CB(from)->end_seq); + MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq; +@@ -219,7 +219,7 @@ static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb) + end_seq = MPTCP_SKB_CB(skb)->end_seq; + max_seq = atomic64_read(&msk->rcv_wnd_sent); + +- pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq, ++ pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq, + RB_EMPTY_ROOT(&msk->out_of_order_queue)); + if (after64(end_seq, max_seq)) { + /* out of window */ +@@ -643,7 +643,7 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, + } + } + +- pr_debug("msk=%p ssk=%p", msk, ssk); ++ pr_debug("msk=%p ssk=%p\n", msk, ssk); + tp = tcp_sk(ssk); + do { + u32 map_remaining, offset; +@@ -722,7 +722,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk) + u64 end_seq; + + p = rb_first(&msk->out_of_order_queue); +- pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue)); ++ pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue)); + while (p) { + skb = rb_to_skb(p); + if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) +@@ -744,7 +744,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk) + int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq; + + /* skip overlapping data, if any */ +- pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d", ++ pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n", + MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq, + delta); + MPTCP_SKB_CB(skb)->offset += delta; +@@ -1235,7 +1235,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, + size_t copy; + int i; + +- pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u", ++ pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u\n", + msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent); + + if (WARN_ON_ONCE(info->sent > info->limit || +@@ -1336,7 +1336,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, + mpext->use_map = 1; + mpext->dsn64 = 1; + +- pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d", ++ pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n", + mpext->data_seq, mpext->subflow_seq, mpext->data_len, + mpext->dsn64); + +@@ -1855,7 +1855,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) + if (!msk->first_pending) + WRITE_ONCE(msk->first_pending, dfrag); + } +- pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk, ++ pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk, + dfrag->data_seq, dfrag->data_len, dfrag->already_sent, + !dfrag_collapsed); + +@@ -2211,7 +2211,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + } + } + +- pr_debug("block timeout %ld", timeo); ++ pr_debug("block timeout %ld\n", timeo); + sk_wait_data(sk, &timeo, NULL); + } + +@@ -2227,7 +2227,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + } + } + +- pr_debug("msk=%p rx queue empty=%d:%d copied=%d", ++ pr_debug("msk=%p rx queue empty=%d:%d copied=%d\n", + msk, skb_queue_empty_lockless(&sk->sk_receive_queue), + skb_queue_empty(&msk->receive_queue), copied); + if (!(flags & MSG_PEEK)) +@@ -2471,6 +2471,12 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, + void mptcp_close_ssk(struct sock *sk, struct sock *ssk, + struct mptcp_subflow_context *subflow) + { ++ /* The first subflow can already be closed and still in the list */ ++ if (subflow->close_event_done) ++ return; ++ ++ subflow->close_event_done = true; ++ + if (sk->sk_state == TCP_ESTABLISHED) + mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL); + +@@ -2680,7 +2686,7 @@ static void mptcp_mp_fail_no_response(struct mptcp_sock *msk) + if (!ssk) + return; + +- pr_debug("MP_FAIL doesn't respond, reset the subflow"); ++ pr_debug("MP_FAIL doesn't respond, reset the subflow\n"); + + slow = lock_sock_fast(ssk); + mptcp_subflow_reset(ssk); +@@ -2850,7 +2856,7 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how) + break; + default: + if (__mptcp_check_fallback(mptcp_sk(sk))) { +- pr_debug("Fallback"); ++ pr_debug("Fallback\n"); + ssk->sk_shutdown |= how; + tcp_shutdown(ssk, how); + +@@ -2860,7 +2866,7 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how) + WRITE_ONCE(mptcp_sk(sk)->snd_una, mptcp_sk(sk)->snd_nxt); + mptcp_schedule_work(sk); + } else { +- pr_debug("Sending DATA_FIN on subflow %p", ssk); ++ pr_debug("Sending DATA_FIN on subflow %p\n", ssk); + tcp_send_ack(ssk); + if (!mptcp_rtx_timer_pending(sk)) + mptcp_reset_rtx_timer(sk); +@@ -2926,7 +2932,7 @@ static void mptcp_check_send_data_fin(struct sock *sk) + struct mptcp_subflow_context *subflow; + struct mptcp_sock *msk = mptcp_sk(sk); + +- pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu", ++ pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu\n", + msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk), + msk->snd_nxt, msk->write_seq); + +@@ -2950,7 +2956,7 @@ static void __mptcp_wr_shutdown(struct sock *sk) + { + struct mptcp_sock *msk = mptcp_sk(sk); + +- pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d", ++ pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d\n", + msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state, + !!mptcp_send_head(sk)); + +@@ -2965,7 +2971,7 @@ static void __mptcp_destroy_sock(struct sock *sk) + { + struct mptcp_sock *msk = mptcp_sk(sk); + +- pr_debug("msk=%p", msk); ++ pr_debug("msk=%p\n", msk); + + might_sleep(); + +@@ -3073,7 +3079,7 @@ bool __mptcp_close(struct sock *sk, long timeout) + mptcp_set_state(sk, TCP_CLOSE); + + sock_hold(sk); +- pr_debug("msk=%p state=%d", sk, sk->sk_state); ++ pr_debug("msk=%p state=%d\n", sk, sk->sk_state); + if (msk->token) + mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL); + +@@ -3508,7 +3514,7 @@ static int mptcp_get_port(struct sock *sk, unsigned short snum) + { + struct mptcp_sock *msk = mptcp_sk(sk); + +- pr_debug("msk=%p, ssk=%p", msk, msk->first); ++ pr_debug("msk=%p, ssk=%p\n", msk, msk->first); + if (WARN_ON_ONCE(!msk->first)) + return -EINVAL; + +@@ -3525,7 +3531,7 @@ void mptcp_finish_connect(struct sock *ssk) + sk = subflow->conn; + msk = mptcp_sk(sk); + +- pr_debug("msk=%p, token=%u", sk, subflow->token); ++ pr_debug("msk=%p, token=%u\n", sk, subflow->token); + + subflow->map_seq = subflow->iasn; + subflow->map_subflow_seq = 1; +@@ -3554,7 +3560,7 @@ bool mptcp_finish_join(struct sock *ssk) + struct sock *parent = (void *)msk; + bool ret = true; + +- pr_debug("msk=%p, subflow=%p", msk, subflow); ++ pr_debug("msk=%p, subflow=%p\n", msk, subflow); + + /* mptcp socket already closing? */ + if (!mptcp_is_fully_established(parent)) { +@@ -3600,7 +3606,7 @@ bool mptcp_finish_join(struct sock *ssk) + + static void mptcp_shutdown(struct sock *sk, int how) + { +- pr_debug("sk=%p, how=%d", sk, how); ++ pr_debug("sk=%p, how=%d\n", sk, how); + + if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk)) + __mptcp_wr_shutdown(sk); +@@ -3820,7 +3826,7 @@ static int mptcp_listen(struct socket *sock, int backlog) + struct sock *ssk; + int err; + +- pr_debug("msk=%p", msk); ++ pr_debug("msk=%p\n", msk); + + lock_sock(sk); + +@@ -3860,7 +3866,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, + struct sock *ssk, *newsk; + int err; + +- pr_debug("msk=%p", msk); ++ pr_debug("msk=%p\n", msk); + + /* Buggy applications can call accept on socket states other then LISTEN + * but no need to allocate the first subflow just to error out. +@@ -3869,12 +3875,12 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, + if (!ssk) + return -EINVAL; + +- pr_debug("ssk=%p, listener=%p", ssk, mptcp_subflow_ctx(ssk)); ++ pr_debug("ssk=%p, listener=%p\n", ssk, mptcp_subflow_ctx(ssk)); + newsk = inet_csk_accept(ssk, flags, &err, kern); + if (!newsk) + return err; + +- pr_debug("newsk=%p, subflow is mptcp=%d", newsk, sk_is_mptcp(newsk)); ++ pr_debug("newsk=%p, subflow is mptcp=%d\n", newsk, sk_is_mptcp(newsk)); + if (sk_is_mptcp(newsk)) { + struct mptcp_subflow_context *subflow; + struct sock *new_mptcp_sock; +@@ -3967,7 +3973,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock, + sock_poll_wait(file, sock, wait); + + state = inet_sk_state_load(sk); +- pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags); ++ pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags); + if (state == TCP_LISTEN) { + struct sock *ssk = READ_ONCE(msk->first); + +diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h +index 20736f31dc534..b9a4f6364b785 100644 +--- a/net/mptcp/protocol.h ++++ b/net/mptcp/protocol.h +@@ -500,7 +500,8 @@ struct mptcp_subflow_context { + stale : 1, /* unable to snd/rcv data, do not use for xmit */ + valid_csum_seen : 1, /* at least one csum validated */ + is_mptfo : 1, /* subflow is doing TFO */ +- __unused : 10; ++ close_event_done : 1, /* has done the post-closed part */ ++ __unused : 9; + enum mptcp_data_avail data_avail; + bool scheduled; + u32 remote_nonce; +@@ -948,8 +949,6 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk, + bool echo); + int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list); + void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list); +-void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk, +- struct list_head *rm_list); + + void mptcp_free_local_addr_list(struct mptcp_sock *msk); + int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info); +@@ -1093,7 +1092,7 @@ static inline bool mptcp_check_fallback(const struct sock *sk) + static inline void __mptcp_do_fallback(struct mptcp_sock *msk) + { + if (test_bit(MPTCP_FALLBACK_DONE, &msk->flags)) { +- pr_debug("TCP fallback already done (msk=%p)", msk); ++ pr_debug("TCP fallback already done (msk=%p)\n", msk); + return; + } + set_bit(MPTCP_FALLBACK_DONE, &msk->flags); +@@ -1120,7 +1119,7 @@ static inline void mptcp_do_fallback(struct sock *ssk) + } + } + +-#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a) ++#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)\n", __func__, a) + + static inline bool mptcp_check_infinite_map(struct sk_buff *skb) + { +diff --git a/net/mptcp/sched.c b/net/mptcp/sched.c +index 4ab0693c069c0..907986f5f0427 100644 +--- a/net/mptcp/sched.c ++++ b/net/mptcp/sched.c +@@ -64,7 +64,7 @@ int mptcp_register_scheduler(struct mptcp_sched_ops *sched) + list_add_tail_rcu(&sched->list, &mptcp_sched_list); + spin_unlock(&mptcp_sched_list_lock); + +- pr_debug("%s registered", sched->name); ++ pr_debug("%s registered\n", sched->name); + return 0; + } + +@@ -96,7 +96,7 @@ int mptcp_init_sched(struct mptcp_sock *msk, + if (msk->sched->init) + msk->sched->init(msk); + +- pr_debug("sched=%s", msk->sched->name); ++ pr_debug("sched=%s\n", msk->sched->name); + + return 0; + } +diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c +index bdfeecea814f3..d0f73b9180c7c 100644 +--- a/net/mptcp/sockopt.c ++++ b/net/mptcp/sockopt.c +@@ -858,7 +858,7 @@ int mptcp_setsockopt(struct sock *sk, int level, int optname, + struct mptcp_sock *msk = mptcp_sk(sk); + struct sock *ssk; + +- pr_debug("msk=%p", msk); ++ pr_debug("msk=%p\n", msk); + + if (level == SOL_SOCKET) + return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen); +@@ -1416,7 +1416,7 @@ int mptcp_getsockopt(struct sock *sk, int level, int optname, + struct mptcp_sock *msk = mptcp_sk(sk); + struct sock *ssk; + +- pr_debug("msk=%p", msk); ++ pr_debug("msk=%p\n", msk); + + /* @@ the meaning of setsockopt() when the socket is connected and + * there are multiple subflows is not yet defined. It is up to the +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c +index 81050aa55f7b9..2c779275a0310 100644 +--- a/net/mptcp/subflow.c ++++ b/net/mptcp/subflow.c +@@ -40,7 +40,7 @@ static void subflow_req_destructor(struct request_sock *req) + { + struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); + +- pr_debug("subflow_req=%p", subflow_req); ++ pr_debug("subflow_req=%p\n", subflow_req); + + if (subflow_req->msk) + sock_put((struct sock *)subflow_req->msk); +@@ -146,7 +146,7 @@ static int subflow_check_req(struct request_sock *req, + struct mptcp_options_received mp_opt; + bool opt_mp_capable, opt_mp_join; + +- pr_debug("subflow_req=%p, listener=%p", subflow_req, listener); ++ pr_debug("subflow_req=%p, listener=%p\n", subflow_req, listener); + + #ifdef CONFIG_TCP_MD5SIG + /* no MPTCP if MD5SIG is enabled on this socket or we may run out of +@@ -219,7 +219,7 @@ static int subflow_check_req(struct request_sock *req, + } + + if (subflow_use_different_sport(subflow_req->msk, sk_listener)) { +- pr_debug("syn inet_sport=%d %d", ++ pr_debug("syn inet_sport=%d %d\n", + ntohs(inet_sk(sk_listener)->inet_sport), + ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport)); + if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) { +@@ -238,7 +238,7 @@ static int subflow_check_req(struct request_sock *req, + return -EPERM; + } + +- pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token, ++ pr_debug("token=%u, remote_nonce=%u msk=%p\n", subflow_req->token, + subflow_req->remote_nonce, subflow_req->msk); + } + +@@ -508,7 +508,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) + subflow->rel_write_seq = 1; + subflow->conn_finished = 1; + subflow->ssn_offset = TCP_SKB_CB(skb)->seq; +- pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset); ++ pr_debug("subflow=%p synack seq=%x\n", subflow, subflow->ssn_offset); + + mptcp_get_options(skb, &mp_opt); + if (subflow->request_mptcp) { +@@ -540,7 +540,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) + subflow->thmac = mp_opt.thmac; + subflow->remote_nonce = mp_opt.nonce; + WRITE_ONCE(subflow->remote_id, mp_opt.join_id); +- pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d", ++ pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d\n", + subflow, subflow->thmac, subflow->remote_nonce, + subflow->backup); + +@@ -566,7 +566,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX); + + if (subflow_use_different_dport(msk, sk)) { +- pr_debug("synack inet_dport=%d %d", ++ pr_debug("synack inet_dport=%d %d\n", + ntohs(inet_sk(sk)->inet_dport), + ntohs(inet_sk(parent)->inet_dport)); + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX); +@@ -636,7 +636,7 @@ static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) + { + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); + +- pr_debug("subflow=%p", subflow); ++ pr_debug("subflow=%p\n", subflow); + + /* Never answer to SYNs sent to broadcast or multicast */ + if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) +@@ -667,7 +667,7 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb) + { + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); + +- pr_debug("subflow=%p", subflow); ++ pr_debug("subflow=%p\n", subflow); + + if (skb->protocol == htons(ETH_P_IP)) + return subflow_v4_conn_request(sk, skb); +@@ -786,7 +786,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, + struct mptcp_sock *owner; + struct sock *child; + +- pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn); ++ pr_debug("listener=%p, req=%p, conn=%p\n", listener, req, listener->conn); + + /* After child creation we must look for MPC even when options + * are not parsed +@@ -877,7 +877,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, + ctx->conn = (struct sock *)owner; + + if (subflow_use_different_sport(owner, sk)) { +- pr_debug("ack inet_sport=%d %d", ++ pr_debug("ack inet_sport=%d %d\n", + ntohs(inet_sk(sk)->inet_sport), + ntohs(inet_sk((struct sock *)owner)->inet_sport)); + if (!mptcp_pm_sport_in_anno_list(owner, sk)) { +@@ -934,7 +934,7 @@ enum mapping_status { + + static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn) + { +- pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d", ++ pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d\n", + ssn, subflow->map_subflow_seq, subflow->map_data_len); + } + +@@ -1094,7 +1094,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk, + + data_len = mpext->data_len; + if (data_len == 0) { +- pr_debug("infinite mapping received"); ++ pr_debug("infinite mapping received\n"); + MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX); + subflow->map_data_len = 0; + return MAPPING_INVALID; +@@ -1104,7 +1104,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk, + if (data_len == 1) { + bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq, + mpext->dsn64); +- pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq); ++ pr_debug("DATA_FIN with no payload seq=%llu\n", mpext->data_seq); + if (subflow->map_valid) { + /* A DATA_FIN might arrive in a DSS + * option before the previous mapping +@@ -1129,7 +1129,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk, + data_fin_seq &= GENMASK_ULL(31, 0); + + mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64); +- pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d", ++ pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d\n", + data_fin_seq, mpext->dsn64); + } + +@@ -1176,7 +1176,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk, + if (unlikely(subflow->map_csum_reqd != csum_reqd)) + return MAPPING_INVALID; + +- pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u", ++ pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n", + subflow->map_seq, subflow->map_subflow_seq, + subflow->map_data_len, subflow->map_csum_reqd, + subflow->map_data_csum); +@@ -1211,7 +1211,7 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb, + avail_len = skb->len - offset; + incr = limit >= avail_len ? avail_len + fin : limit; + +- pr_debug("discarding=%d len=%d offset=%d seq=%d", incr, skb->len, ++ pr_debug("discarding=%d len=%d offset=%d seq=%d\n", incr, skb->len, + offset, subflow->map_subflow_seq); + MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA); + tcp_sk(ssk)->copied_seq += incr; +@@ -1312,7 +1312,7 @@ static bool subflow_check_data_avail(struct sock *ssk) + + old_ack = READ_ONCE(msk->ack_seq); + ack_seq = mptcp_subflow_get_mapped_dsn(subflow); +- pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack, ++ pr_debug("msk ack_seq=%llx subflow ack_seq=%llx\n", old_ack, + ack_seq); + if (unlikely(before64(ack_seq, old_ack))) { + mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq); +@@ -1384,7 +1384,7 @@ bool mptcp_subflow_data_available(struct sock *sk) + subflow->map_valid = 0; + WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA); + +- pr_debug("Done with mapping: seq=%u data_len=%u", ++ pr_debug("Done with mapping: seq=%u data_len=%u\n", + subflow->map_subflow_seq, + subflow->map_data_len); + } +@@ -1494,7 +1494,7 @@ void mptcpv6_handle_mapped(struct sock *sk, bool mapped) + + target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk); + +- pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d", ++ pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d\n", + subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped); + + if (likely(icsk->icsk_af_ops == target)) +@@ -1589,7 +1589,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc, + goto failed; + + mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL); +- pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk, ++ pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d\n", msk, + remote_token, local_id, remote_id); + subflow->remote_token = remote_token; + WRITE_ONCE(subflow->remote_id, remote_id); +@@ -1727,7 +1727,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family, + SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid; + + subflow = mptcp_subflow_ctx(sf->sk); +- pr_debug("subflow=%p", subflow); ++ pr_debug("subflow=%p\n", subflow); + + *new_sock = sf; + sock_hold(sk); +@@ -1751,7 +1751,7 @@ static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk, + INIT_LIST_HEAD(&ctx->node); + INIT_LIST_HEAD(&ctx->delegated_node); + +- pr_debug("subflow=%p", ctx); ++ pr_debug("subflow=%p\n", ctx); + + ctx->tcp_sock = sk; + WRITE_ONCE(ctx->local_id, -1); +@@ -1902,7 +1902,7 @@ static int subflow_ulp_init(struct sock *sk) + goto out; + } + +- pr_debug("subflow=%p, family=%d", ctx, sk->sk_family); ++ pr_debug("subflow=%p, family=%d\n", ctx, sk->sk_family); + + tp->is_mptcp = 1; + ctx->icsk_af_ops = icsk->icsk_af_ops; +diff --git a/net/wireless/scan.c b/net/wireless/scan.c +index 74db51348a7ff..4d88e797ae49f 100644 +--- a/net/wireless/scan.c ++++ b/net/wireless/scan.c +@@ -1562,7 +1562,7 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, + } + EXPORT_SYMBOL(cfg80211_get_bss); + +-static void rb_insert_bss(struct cfg80211_registered_device *rdev, ++static bool rb_insert_bss(struct cfg80211_registered_device *rdev, + struct cfg80211_internal_bss *bss) + { + struct rb_node **p = &rdev->bss_tree.rb_node; +@@ -1578,7 +1578,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *rdev, + + if (WARN_ON(!cmp)) { + /* will sort of leak this BSS */ +- return; ++ return false; + } + + if (cmp < 0) +@@ -1589,6 +1589,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *rdev, + + rb_link_node(&bss->rbn, parent, p); + rb_insert_color(&bss->rbn, &rdev->bss_tree); ++ return true; + } + + static struct cfg80211_internal_bss * +@@ -1615,6 +1616,34 @@ rb_find_bss(struct cfg80211_registered_device *rdev, + return NULL; + } + ++static void cfg80211_insert_bss(struct cfg80211_registered_device *rdev, ++ struct cfg80211_internal_bss *bss) ++{ ++ lockdep_assert_held(&rdev->bss_lock); ++ ++ if (!rb_insert_bss(rdev, bss)) ++ return; ++ list_add_tail(&bss->list, &rdev->bss_list); ++ rdev->bss_entries++; ++} ++ ++static void cfg80211_rehash_bss(struct cfg80211_registered_device *rdev, ++ struct cfg80211_internal_bss *bss) ++{ ++ lockdep_assert_held(&rdev->bss_lock); ++ ++ rb_erase(&bss->rbn, &rdev->bss_tree); ++ if (!rb_insert_bss(rdev, bss)) { ++ list_del(&bss->list); ++ if (!list_empty(&bss->hidden_list)) ++ list_del_init(&bss->hidden_list); ++ if (!list_empty(&bss->pub.nontrans_list)) ++ list_del_init(&bss->pub.nontrans_list); ++ rdev->bss_entries--; ++ } ++ rdev->bss_generation++; ++} ++ + static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev, + struct cfg80211_internal_bss *new) + { +@@ -1876,9 +1905,7 @@ __cfg80211_bss_update(struct cfg80211_registered_device *rdev, + bss_ref_get(rdev, bss_from_pub(tmp->pub.transmitted_bss)); + } + +- list_add_tail(&new->list, &rdev->bss_list); +- rdev->bss_entries++; +- rb_insert_bss(rdev, new); ++ cfg80211_insert_bss(rdev, new); + found = new; + } + +@@ -3111,19 +3138,14 @@ void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev, + if (!WARN_ON(!__cfg80211_unlink_bss(rdev, new))) + rdev->bss_generation++; + } +- +- rb_erase(&cbss->rbn, &rdev->bss_tree); +- rb_insert_bss(rdev, cbss); +- rdev->bss_generation++; ++ cfg80211_rehash_bss(rdev, cbss); + + list_for_each_entry_safe(nontrans_bss, tmp, + &cbss->pub.nontrans_list, + nontrans_list) { + bss = bss_from_pub(nontrans_bss); + bss->pub.channel = chan; +- rb_erase(&bss->rbn, &rdev->bss_tree); +- rb_insert_bss(rdev, bss); +- rdev->bss_generation++; ++ cfg80211_rehash_bss(rdev, bss); + } + + done: +diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c +index 63ddefb6ddd1c..23b2853ce3c42 100644 +--- a/security/apparmor/apparmorfs.c ++++ b/security/apparmor/apparmorfs.c +@@ -1698,6 +1698,10 @@ int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent) + struct aa_profile *p; + p = aa_deref_parent(profile); + dent = prof_dir(p); ++ if (!dent) { ++ error = -ENOENT; ++ goto fail2; ++ } + /* adding to parent that previously didn't have children */ + dent = aafs_create_dir("profiles", dent); + if (IS_ERR(dent)) +diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c +index 6b92e09d3f780..98c2bdbfcaed6 100644 +--- a/security/smack/smack_lsm.c ++++ b/security/smack/smack_lsm.c +@@ -4354,7 +4354,7 @@ static int smack_inet_conn_request(const struct sock *sk, struct sk_buff *skb, + rcu_read_unlock(); + + if (hskp == NULL) +- rc = netlbl_req_setattr(req, &skp->smk_netlabel); ++ rc = netlbl_req_setattr(req, &ssp->smk_out->smk_netlabel); + else + netlbl_req_delattr(req); + +diff --git a/sound/core/seq/seq_ports.h b/sound/core/seq/seq_ports.h +index b111382f697aa..9e36738c0dd04 100644 +--- a/sound/core/seq/seq_ports.h ++++ b/sound/core/seq/seq_ports.h +@@ -7,6 +7,7 @@ + #define __SND_SEQ_PORTS_H + + #include ++#include + #include "seq_lock.h" + + /* list of 'exported' ports */ +@@ -42,17 +43,6 @@ struct snd_seq_port_subs_info { + int (*close)(void *private_data, struct snd_seq_port_subscribe *info); + }; + +-/* context for converting from legacy control event to UMP packet */ +-struct snd_seq_ump_midi2_bank { +- bool rpn_set; +- bool nrpn_set; +- bool bank_set; +- unsigned char cc_rpn_msb, cc_rpn_lsb; +- unsigned char cc_nrpn_msb, cc_nrpn_lsb; +- unsigned char cc_data_msb, cc_data_lsb; +- unsigned char cc_bank_msb, cc_bank_lsb; +-}; +- + struct snd_seq_client_port { + + struct snd_seq_addr addr; /* client/port number */ +@@ -88,7 +78,7 @@ struct snd_seq_client_port { + unsigned char ump_group; + + #if IS_ENABLED(CONFIG_SND_SEQ_UMP) +- struct snd_seq_ump_midi2_bank midi2_bank[16]; /* per channel */ ++ struct ump_cvt_to_ump_bank midi2_bank[16]; /* per channel */ + #endif + }; + +diff --git a/sound/core/seq/seq_ump_convert.c b/sound/core/seq/seq_ump_convert.c +index d9dacfbe4a9ae..4dd540cbb1cbb 100644 +--- a/sound/core/seq/seq_ump_convert.c ++++ b/sound/core/seq/seq_ump_convert.c +@@ -368,7 +368,7 @@ static int cvt_ump_midi1_to_midi2(struct snd_seq_client *dest, + struct snd_seq_ump_event ev_cvt; + const union snd_ump_midi1_msg *midi1 = (const union snd_ump_midi1_msg *)event->ump; + union snd_ump_midi2_msg *midi2 = (union snd_ump_midi2_msg *)ev_cvt.ump; +- struct snd_seq_ump_midi2_bank *cc; ++ struct ump_cvt_to_ump_bank *cc; + + ev_cvt = *event; + memset(&ev_cvt.ump, 0, sizeof(ev_cvt.ump)); +@@ -789,28 +789,45 @@ static int paf_ev_to_ump_midi2(const struct snd_seq_event *event, + return 1; + } + ++static void reset_rpn(struct ump_cvt_to_ump_bank *cc) ++{ ++ cc->rpn_set = 0; ++ cc->nrpn_set = 0; ++ cc->cc_rpn_msb = cc->cc_rpn_lsb = 0; ++ cc->cc_data_msb = cc->cc_data_lsb = 0; ++ cc->cc_data_msb_set = cc->cc_data_lsb_set = 0; ++} ++ + /* set up the MIDI2 RPN/NRPN packet data from the parsed info */ +-static void fill_rpn(struct snd_seq_ump_midi2_bank *cc, +- union snd_ump_midi2_msg *data, +- unsigned char channel) ++static int fill_rpn(struct ump_cvt_to_ump_bank *cc, ++ union snd_ump_midi2_msg *data, ++ unsigned char channel, ++ bool flush) + { ++ if (!(cc->cc_data_lsb_set || cc->cc_data_msb_set)) ++ return 0; // skip ++ /* when not flushing, wait for complete data set */ ++ if (!flush && (!cc->cc_data_lsb_set || !cc->cc_data_msb_set)) ++ return 0; // skip ++ + if (cc->rpn_set) { + data->rpn.status = UMP_MSG_STATUS_RPN; + data->rpn.bank = cc->cc_rpn_msb; + data->rpn.index = cc->cc_rpn_lsb; +- cc->rpn_set = 0; +- cc->cc_rpn_msb = cc->cc_rpn_lsb = 0; +- } else { ++ } else if (cc->nrpn_set) { + data->rpn.status = UMP_MSG_STATUS_NRPN; + data->rpn.bank = cc->cc_nrpn_msb; + data->rpn.index = cc->cc_nrpn_lsb; +- cc->nrpn_set = 0; +- cc->cc_nrpn_msb = cc->cc_nrpn_lsb = 0; ++ } else { ++ return 0; // skip + } ++ + data->rpn.data = upscale_14_to_32bit((cc->cc_data_msb << 7) | + cc->cc_data_lsb); + data->rpn.channel = channel; +- cc->cc_data_msb = cc->cc_data_lsb = 0; ++ ++ reset_rpn(cc); ++ return 1; + } + + /* convert CC event to MIDI 2.0 UMP */ +@@ -822,29 +839,39 @@ static int cc_ev_to_ump_midi2(const struct snd_seq_event *event, + unsigned char channel = event->data.control.channel & 0x0f; + unsigned char index = event->data.control.param & 0x7f; + unsigned char val = event->data.control.value & 0x7f; +- struct snd_seq_ump_midi2_bank *cc = &dest_port->midi2_bank[channel]; ++ struct ump_cvt_to_ump_bank *cc = &dest_port->midi2_bank[channel]; ++ int ret; + + /* process special CC's (bank/rpn/nrpn) */ + switch (index) { + case UMP_CC_RPN_MSB: ++ ret = fill_rpn(cc, data, channel, true); + cc->rpn_set = 1; + cc->cc_rpn_msb = val; +- return 0; // skip ++ if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f) ++ reset_rpn(cc); ++ return ret; + case UMP_CC_RPN_LSB: ++ ret = fill_rpn(cc, data, channel, true); + cc->rpn_set = 1; + cc->cc_rpn_lsb = val; +- return 0; // skip ++ if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f) ++ reset_rpn(cc); ++ return ret; + case UMP_CC_NRPN_MSB: ++ ret = fill_rpn(cc, data, channel, true); + cc->nrpn_set = 1; + cc->cc_nrpn_msb = val; +- return 0; // skip ++ return ret; + case UMP_CC_NRPN_LSB: ++ ret = fill_rpn(cc, data, channel, true); + cc->nrpn_set = 1; + cc->cc_nrpn_lsb = val; +- return 0; // skip ++ return ret; + case UMP_CC_DATA: ++ cc->cc_data_msb_set = 1; + cc->cc_data_msb = val; +- return 0; // skip ++ return fill_rpn(cc, data, channel, false); + case UMP_CC_BANK_SELECT: + cc->bank_set = 1; + cc->cc_bank_msb = val; +@@ -854,11 +881,9 @@ static int cc_ev_to_ump_midi2(const struct snd_seq_event *event, + cc->cc_bank_lsb = val; + return 0; // skip + case UMP_CC_DATA_LSB: ++ cc->cc_data_lsb_set = 1; + cc->cc_data_lsb = val; +- if (!(cc->rpn_set || cc->nrpn_set)) +- return 0; // skip +- fill_rpn(cc, data, channel); +- return 1; ++ return fill_rpn(cc, data, channel, false); + } + + data->cc.status = status; +@@ -887,7 +912,7 @@ static int pgm_ev_to_ump_midi2(const struct snd_seq_event *event, + unsigned char status) + { + unsigned char channel = event->data.control.channel & 0x0f; +- struct snd_seq_ump_midi2_bank *cc = &dest_port->midi2_bank[channel]; ++ struct ump_cvt_to_ump_bank *cc = &dest_port->midi2_bank[channel]; + + data->pg.status = status; + data->pg.channel = channel; +@@ -924,8 +949,9 @@ static int ctrl14_ev_to_ump_midi2(const struct snd_seq_event *event, + { + unsigned char channel = event->data.control.channel & 0x0f; + unsigned char index = event->data.control.param & 0x7f; +- struct snd_seq_ump_midi2_bank *cc = &dest_port->midi2_bank[channel]; ++ struct ump_cvt_to_ump_bank *cc = &dest_port->midi2_bank[channel]; + unsigned char msb, lsb; ++ int ret; + + msb = (event->data.control.value >> 7) & 0x7f; + lsb = event->data.control.value & 0x7f; +@@ -939,28 +965,27 @@ static int ctrl14_ev_to_ump_midi2(const struct snd_seq_event *event, + cc->cc_bank_lsb = lsb; + return 0; // skip + case UMP_CC_RPN_MSB: +- cc->cc_rpn_msb = msb; +- fallthrough; + case UMP_CC_RPN_LSB: +- cc->rpn_set = 1; ++ ret = fill_rpn(cc, data, channel, true); ++ cc->cc_rpn_msb = msb; + cc->cc_rpn_lsb = lsb; +- return 0; // skip ++ cc->rpn_set = 1; ++ if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f) ++ reset_rpn(cc); ++ return ret; + case UMP_CC_NRPN_MSB: +- cc->cc_nrpn_msb = msb; +- fallthrough; + case UMP_CC_NRPN_LSB: ++ ret = fill_rpn(cc, data, channel, true); ++ cc->cc_nrpn_msb = msb; + cc->nrpn_set = 1; + cc->cc_nrpn_lsb = lsb; +- return 0; // skip ++ return ret; + case UMP_CC_DATA: +- cc->cc_data_msb = msb; +- fallthrough; + case UMP_CC_DATA_LSB: ++ cc->cc_data_msb_set = cc->cc_data_lsb_set = 1; ++ cc->cc_data_msb = msb; + cc->cc_data_lsb = lsb; +- if (!(cc->rpn_set || cc->nrpn_set)) +- return 0; // skip +- fill_rpn(cc, data, channel); +- return 1; ++ return fill_rpn(cc, data, channel, false); + } + + data->cc.status = UMP_MSG_STATUS_CC; +diff --git a/sound/core/ump_convert.c b/sound/core/ump_convert.c +index f67c44c83fde4..0fe13d0316568 100644 +--- a/sound/core/ump_convert.c ++++ b/sound/core/ump_convert.c +@@ -287,25 +287,42 @@ static int cvt_legacy_system_to_ump(struct ump_cvt_to_ump *cvt, + return 4; + } + +-static void fill_rpn(struct ump_cvt_to_ump_bank *cc, +- union snd_ump_midi2_msg *midi2) ++static void reset_rpn(struct ump_cvt_to_ump_bank *cc) + { ++ cc->rpn_set = 0; ++ cc->nrpn_set = 0; ++ cc->cc_rpn_msb = cc->cc_rpn_lsb = 0; ++ cc->cc_data_msb = cc->cc_data_lsb = 0; ++ cc->cc_data_msb_set = cc->cc_data_lsb_set = 0; ++} ++ ++static int fill_rpn(struct ump_cvt_to_ump_bank *cc, ++ union snd_ump_midi2_msg *midi2, ++ bool flush) ++{ ++ if (!(cc->cc_data_lsb_set || cc->cc_data_msb_set)) ++ return 0; // skip ++ /* when not flushing, wait for complete data set */ ++ if (!flush && (!cc->cc_data_lsb_set || !cc->cc_data_msb_set)) ++ return 0; // skip ++ + if (cc->rpn_set) { + midi2->rpn.status = UMP_MSG_STATUS_RPN; + midi2->rpn.bank = cc->cc_rpn_msb; + midi2->rpn.index = cc->cc_rpn_lsb; +- cc->rpn_set = 0; +- cc->cc_rpn_msb = cc->cc_rpn_lsb = 0; +- } else { ++ } else if (cc->nrpn_set) { + midi2->rpn.status = UMP_MSG_STATUS_NRPN; + midi2->rpn.bank = cc->cc_nrpn_msb; + midi2->rpn.index = cc->cc_nrpn_lsb; +- cc->nrpn_set = 0; +- cc->cc_nrpn_msb = cc->cc_nrpn_lsb = 0; ++ } else { ++ return 0; // skip + } ++ + midi2->rpn.data = upscale_14_to_32bit((cc->cc_data_msb << 7) | + cc->cc_data_lsb); +- cc->cc_data_msb = cc->cc_data_lsb = 0; ++ ++ reset_rpn(cc); ++ return 1; + } + + /* convert to a MIDI 1.0 Channel Voice message */ +@@ -318,6 +335,7 @@ static int cvt_legacy_cmd_to_ump(struct ump_cvt_to_ump *cvt, + struct ump_cvt_to_ump_bank *cc; + union snd_ump_midi2_msg *midi2 = (union snd_ump_midi2_msg *)data; + unsigned char status, channel; ++ int ret; + + BUILD_BUG_ON(sizeof(union snd_ump_midi1_msg) != 4); + BUILD_BUG_ON(sizeof(union snd_ump_midi2_msg) != 8); +@@ -358,24 +376,33 @@ static int cvt_legacy_cmd_to_ump(struct ump_cvt_to_ump *cvt, + case UMP_MSG_STATUS_CC: + switch (buf[1]) { + case UMP_CC_RPN_MSB: ++ ret = fill_rpn(cc, midi2, true); + cc->rpn_set = 1; + cc->cc_rpn_msb = buf[2]; +- return 0; // skip ++ if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f) ++ reset_rpn(cc); ++ return ret; + case UMP_CC_RPN_LSB: ++ ret = fill_rpn(cc, midi2, true); + cc->rpn_set = 1; + cc->cc_rpn_lsb = buf[2]; +- return 0; // skip ++ if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f) ++ reset_rpn(cc); ++ return ret; + case UMP_CC_NRPN_MSB: ++ ret = fill_rpn(cc, midi2, true); + cc->nrpn_set = 1; + cc->cc_nrpn_msb = buf[2]; +- return 0; // skip ++ return ret; + case UMP_CC_NRPN_LSB: ++ ret = fill_rpn(cc, midi2, true); + cc->nrpn_set = 1; + cc->cc_nrpn_lsb = buf[2]; +- return 0; // skip ++ return ret; + case UMP_CC_DATA: ++ cc->cc_data_msb_set = 1; + cc->cc_data_msb = buf[2]; +- return 0; // skip ++ return fill_rpn(cc, midi2, false); + case UMP_CC_BANK_SELECT: + cc->bank_set = 1; + cc->cc_bank_msb = buf[2]; +@@ -385,12 +412,9 @@ static int cvt_legacy_cmd_to_ump(struct ump_cvt_to_ump *cvt, + cc->cc_bank_lsb = buf[2]; + return 0; // skip + case UMP_CC_DATA_LSB: ++ cc->cc_data_lsb_set = 1; + cc->cc_data_lsb = buf[2]; +- if (cc->rpn_set || cc->nrpn_set) +- fill_rpn(cc, midi2); +- else +- return 0; // skip +- break; ++ return fill_rpn(cc, midi2, false); + default: + midi2->cc.index = buf[1]; + midi2->cc.data = upscale_7_to_32bit(buf[2]); +diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c +index bf685d01259d3..d3ed3e21b1979 100644 +--- a/sound/pci/hda/hda_generic.c ++++ b/sound/pci/hda/hda_generic.c +@@ -4956,6 +4956,69 @@ void snd_hda_gen_stream_pm(struct hda_codec *codec, hda_nid_t nid, bool on) + } + EXPORT_SYMBOL_GPL(snd_hda_gen_stream_pm); + ++/* forcibly mute the speaker output without caching; return true if updated */ ++static bool force_mute_output_path(struct hda_codec *codec, hda_nid_t nid) ++{ ++ if (!nid) ++ return false; ++ if (!nid_has_mute(codec, nid, HDA_OUTPUT)) ++ return false; /* no mute, skip */ ++ if (snd_hda_codec_amp_read(codec, nid, 0, HDA_OUTPUT, 0) & ++ snd_hda_codec_amp_read(codec, nid, 1, HDA_OUTPUT, 0) & ++ HDA_AMP_MUTE) ++ return false; /* both channels already muted, skip */ ++ ++ /* direct amp update without caching */ ++ snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE, ++ AC_AMP_SET_OUTPUT | AC_AMP_SET_LEFT | ++ AC_AMP_SET_RIGHT | HDA_AMP_MUTE); ++ return true; ++} ++ ++/** ++ * snd_hda_gen_shutup_speakers - Forcibly mute the speaker outputs ++ * @codec: the HDA codec ++ * ++ * Forcibly mute the speaker outputs, to be called at suspend or shutdown. ++ * ++ * The mute state done by this function isn't cached, hence the original state ++ * will be restored at resume. ++ * ++ * Return true if the mute state has been changed. ++ */ ++bool snd_hda_gen_shutup_speakers(struct hda_codec *codec) ++{ ++ struct hda_gen_spec *spec = codec->spec; ++ const int *paths; ++ const struct nid_path *path; ++ int i, p, num_paths; ++ bool updated = false; ++ ++ /* if already powered off, do nothing */ ++ if (!snd_hdac_is_power_on(&codec->core)) ++ return false; ++ ++ if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT) { ++ paths = spec->out_paths; ++ num_paths = spec->autocfg.line_outs; ++ } else { ++ paths = spec->speaker_paths; ++ num_paths = spec->autocfg.speaker_outs; ++ } ++ ++ for (i = 0; i < num_paths; i++) { ++ path = snd_hda_get_path_from_idx(codec, paths[i]); ++ if (!path) ++ continue; ++ for (p = 0; p < path->depth; p++) ++ if (force_mute_output_path(codec, path->path[p])) ++ updated = true; ++ } ++ ++ return updated; ++} ++EXPORT_SYMBOL_GPL(snd_hda_gen_shutup_speakers); ++ + /** + * snd_hda_gen_parse_auto_config - Parse the given BIOS configuration and + * set up the hda_gen_spec +diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h +index a8eea83676299..aed4381f7a619 100644 +--- a/sound/pci/hda/hda_generic.h ++++ b/sound/pci/hda/hda_generic.h +@@ -355,5 +355,6 @@ int snd_hda_gen_add_mute_led_cdev(struct hda_codec *codec, + int snd_hda_gen_add_micmute_led_cdev(struct hda_codec *codec, + int (*callback)(struct led_classdev *, + enum led_brightness)); ++bool snd_hda_gen_shutup_speakers(struct hda_codec *codec); + + #endif /* __SOUND_HDA_GENERIC_H */ +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index af921364195e4..8396d1d93668c 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -205,6 +205,8 @@ static void cx_auto_shutdown(struct hda_codec *codec) + { + struct conexant_spec *spec = codec->spec; + ++ snd_hda_gen_shutup_speakers(codec); ++ + /* Turn the problematic codec into D3 to avoid spurious noises + from the internal speaker during (and after) reboot */ + cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false); +diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c +index d597e59863ee3..f6c1dbd0ebcf5 100644 +--- a/sound/soc/amd/yc/acp6x-mach.c ++++ b/sound/soc/amd/yc/acp6x-mach.c +@@ -220,6 +220,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "21J6"), + } + }, ++ { ++ .driver_data = &acp6x_card, ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "21M3"), ++ } ++ }, + { + .driver_data = &acp6x_card, + .matches = { +@@ -430,6 +437,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { + DMI_MATCH(DMI_BOARD_NAME, "8A3E"), + } + }, ++ { ++ .driver_data = &acp6x_card, ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "HP"), ++ DMI_MATCH(DMI_BOARD_NAME, "8B27"), ++ } ++ }, + { + .driver_data = &acp6x_card, + .matches = { +diff --git a/sound/soc/codecs/es8326.c b/sound/soc/codecs/es8326.c +index 6c263086c44d2..32a9b26ee2c89 100644 +--- a/sound/soc/codecs/es8326.c ++++ b/sound/soc/codecs/es8326.c +@@ -617,6 +617,8 @@ static void es8326_jack_detect_handler(struct work_struct *work) + es8326_disable_micbias(es8326->component); + if (es8326->jack->status & SND_JACK_HEADPHONE) { + dev_dbg(comp->dev, "Report hp remove event\n"); ++ snd_soc_jack_report(es8326->jack, 0, ++ SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2); + snd_soc_jack_report(es8326->jack, 0, SND_JACK_HEADSET); + /* mute adc when mic path switch */ + regmap_write(es8326->regmap, ES8326_ADC_SCALE, 0x33); +diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh +index 2be13dd19ddd2..6faff03acc110 100755 +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh +@@ -21,6 +21,7 @@ cinfail="" + cinsent="" + tmpfile="" + cout="" ++err="" + capout="" + ns1="" + ns2="" +@@ -187,6 +188,7 @@ init() { + cin=$(mktemp) + cinsent=$(mktemp) + cout=$(mktemp) ++ err=$(mktemp) + evts_ns1=$(mktemp) + evts_ns2=$(mktemp) + +@@ -202,6 +204,7 @@ cleanup() + rm -f "$sin" "$sout" "$cinsent" "$cinfail" + rm -f "$tmpfile" + rm -rf $evts_ns1 $evts_ns2 ++ rm -f "$err" + cleanup_partial + } + +@@ -461,16 +464,17 @@ reset_with_fail() + fi + } + ++start_events() ++{ ++ mptcp_lib_events "${ns1}" "${evts_ns1}" evts_ns1_pid ++ mptcp_lib_events "${ns2}" "${evts_ns2}" evts_ns2_pid ++} ++ + reset_with_events() + { + reset "${1}" || return 1 + +- :> "$evts_ns1" +- :> "$evts_ns2" +- ip netns exec $ns1 ./pm_nl_ctl events >> "$evts_ns1" 2>&1 & +- evts_ns1_pid=$! +- ip netns exec $ns2 ./pm_nl_ctl events >> "$evts_ns2" 2>&1 & +- evts_ns2_pid=$! ++ start_events + } + + reset_with_tcp_filter() +@@ -669,7 +673,9 @@ wait_mpj() + kill_events_pids() + { + mptcp_lib_kill_wait $evts_ns1_pid ++ evts_ns1_pid=0 + mptcp_lib_kill_wait $evts_ns2_pid ++ evts_ns2_pid=0 + } + + pm_nl_set_limits() +@@ -2891,13 +2897,6 @@ backup_tests() + fi + } + +-SUB_ESTABLISHED=10 # MPTCP_EVENT_SUB_ESTABLISHED +-LISTENER_CREATED=15 #MPTCP_EVENT_LISTENER_CREATED +-LISTENER_CLOSED=16 #MPTCP_EVENT_LISTENER_CLOSED +- +-AF_INET=2 +-AF_INET6=10 +- + verify_listener_events() + { + local evt=$1 +@@ -2911,9 +2910,9 @@ verify_listener_events() + local sport + local name + +- if [ $e_type = $LISTENER_CREATED ]; then ++ if [ $e_type = $MPTCP_LIB_EVENT_LISTENER_CREATED ]; then + name="LISTENER_CREATED" +- elif [ $e_type = $LISTENER_CLOSED ]; then ++ elif [ $e_type = $MPTCP_LIB_EVENT_LISTENER_CLOSED ]; then + name="LISTENER_CLOSED " + else + name="$e_type" +@@ -2980,8 +2979,10 @@ add_addr_ports_tests() + chk_add_nr 1 1 1 + chk_rm_nr 1 1 invert + +- verify_listener_events $evts_ns1 $LISTENER_CREATED $AF_INET 10.0.2.1 10100 +- verify_listener_events $evts_ns1 $LISTENER_CLOSED $AF_INET 10.0.2.1 10100 ++ verify_listener_events $evts_ns1 $MPTCP_LIB_EVENT_LISTENER_CREATED \ ++ $MPTCP_LIB_AF_INET 10.0.2.1 10100 ++ verify_listener_events $evts_ns1 $MPTCP_LIB_EVENT_LISTENER_CLOSED \ ++ $MPTCP_LIB_AF_INET 10.0.2.1 10100 + kill_events_pids + fi + +@@ -3422,6 +3423,107 @@ userspace_pm_rm_sf() + wait_rm_sf $1 "${cnt}" + } + ++check_output() ++{ ++ local cmd="$1" ++ local expected="$2" ++ local msg="$3" ++ local rc=0 ++ ++ mptcp_lib_check_output "${err}" "${cmd}" "${expected}" || rc=${?} ++ if [ ${rc} -eq 2 ]; then ++ fail_test "fail to check output # error ${rc}" ++ elif [ ${rc} -eq 0 ]; then ++ print_ok ++ elif [ ${rc} -eq 1 ]; then ++ fail_test "fail to check output # different output" ++ fi ++} ++ ++# $1: ns ++userspace_pm_dump() ++{ ++ local evts=$evts_ns1 ++ local tk ++ ++ [ "$1" == "$ns2" ] && evts=$evts_ns2 ++ tk=$(mptcp_lib_evts_get_info token "$evts") ++ ++ ip netns exec $1 ./pm_nl_ctl dump token $tk ++} ++ ++# $1: ns ; $2: id ++userspace_pm_get_addr() ++{ ++ local evts=$evts_ns1 ++ local tk ++ ++ [ "$1" == "$ns2" ] && evts=$evts_ns2 ++ tk=$(mptcp_lib_evts_get_info token "$evts") ++ ++ ip netns exec $1 ./pm_nl_ctl get $2 token $tk ++} ++ ++userspace_pm_chk_dump_addr() ++{ ++ local ns="${1}" ++ local exp="${2}" ++ local check="${3}" ++ ++ print_check "dump addrs ${check}" ++ ++ if false && mptcp_lib_kallsyms_has "mptcp_userspace_pm_dump_addr$"; then ++ check_output "userspace_pm_dump ${ns}" "${exp}" ++ else ++ print_skip ++ fi ++} ++ ++userspace_pm_chk_get_addr() ++{ ++ local ns="${1}" ++ local id="${2}" ++ local exp="${3}" ++ ++ print_check "get id ${id} addr" ++ ++ if false && mptcp_lib_kallsyms_has "mptcp_userspace_pm_get_addr$"; then ++ check_output "userspace_pm_get_addr ${ns} ${id}" "${exp}" ++ else ++ print_skip ++ fi ++} ++ ++# $1: ns ; $2: event type ; $3: count ++chk_evt_nr() ++{ ++ local ns=${1} ++ local evt_name="${2}" ++ local exp="${3}" ++ ++ local evts="${evts_ns1}" ++ local evt="${!evt_name}" ++ local count ++ ++ evt_name="${evt_name:16}" # without MPTCP_LIB_EVENT_ ++ [ "${ns}" == "ns2" ] && evts="${evts_ns2}" ++ ++ print_check "event ${ns} ${evt_name} (${exp})" ++ ++ if [[ "${evt_name}" = "LISTENER_"* ]] && ++ ! mptcp_lib_kallsyms_has "mptcp_event_pm_listener$"; then ++ print_skip "event not supported" ++ return ++ fi ++ ++ count=$(grep -cw "type:${evt}" "${evts}") ++ if [ "${count}" != "${exp}" ]; then ++ fail_test "got ${count} events, expected ${exp}" ++ else ++ print_ok ++ fi ++} ++ + userspace_tests() + { + # userspace pm type prevents add_addr +@@ -3513,11 +3615,17 @@ userspace_tests() + chk_mptcp_info subflows 2 subflows 2 + chk_subflows_total 3 3 + chk_mptcp_info add_addr_signal 2 add_addr_accepted 2 +- userspace_pm_rm_addr $ns1 10 +- userspace_pm_rm_sf $ns1 "::ffff:10.0.2.1" $SUB_ESTABLISHED ++ userspace_pm_chk_dump_addr "${ns1}" \ ++ $'id 10 flags signal 10.0.2.1\nid 20 flags signal 10.0.3.1' \ ++ "signal" ++ userspace_pm_chk_get_addr "${ns1}" "10" "id 10 flags signal 10.0.2.1" ++ userspace_pm_chk_get_addr "${ns1}" "20" "id 20 flags signal 10.0.3.1" ++ userspace_pm_rm_sf $ns1 "::ffff:10.0.2.1" $MPTCP_LIB_EVENT_SUB_ESTABLISHED ++ userspace_pm_chk_dump_addr "${ns1}" \ ++ "id 20 flags signal 10.0.3.1" "after rm_sf 10" + userspace_pm_rm_addr $ns1 20 +- userspace_pm_rm_sf $ns1 10.0.3.1 $SUB_ESTABLISHED +- chk_rm_nr 2 2 invert ++ userspace_pm_chk_dump_addr "${ns1}" "" "after rm_addr 20" ++ chk_rm_nr 1 1 invert + chk_mptcp_info subflows 0 subflows 0 + chk_subflows_total 1 1 + kill_events_pids +@@ -3537,11 +3645,38 @@ userspace_tests() + chk_join_nr 1 1 1 + chk_mptcp_info subflows 1 subflows 1 + chk_subflows_total 2 2 +- userspace_pm_rm_addr $ns2 20 +- userspace_pm_rm_sf $ns2 10.0.3.2 $SUB_ESTABLISHED +- chk_rm_nr 1 1 ++ userspace_pm_chk_dump_addr "${ns2}" \ ++ "id 20 flags subflow 10.0.3.2" \ ++ "subflow" ++ userspace_pm_chk_get_addr "${ns2}" "20" "id 20 flags subflow 10.0.3.2" ++ userspace_pm_rm_sf $ns2 10.0.3.2 $MPTCP_LIB_EVENT_SUB_ESTABLISHED ++ userspace_pm_chk_dump_addr "${ns2}" \ ++ "" \ ++ "after rm_sf 20" ++ chk_rm_nr 0 1 ++ chk_mptcp_info subflows 0 subflows 0 ++ chk_subflows_total 1 1 ++ kill_events_pids ++ mptcp_lib_kill_wait $tests_pid ++ fi ++ ++ # userspace pm create id 0 subflow ++ if reset_with_events "userspace pm create id 0 subflow" && ++ continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then ++ set_userspace_pm $ns2 ++ pm_nl_set_limits $ns1 0 1 ++ speed=5 \ ++ run_tests $ns1 $ns2 10.0.1.1 & ++ local tests_pid=$! ++ wait_mpj $ns2 + chk_mptcp_info subflows 0 subflows 0 + chk_subflows_total 1 1 ++ userspace_pm_add_sf $ns2 10.0.3.2 0 ++ userspace_pm_chk_dump_addr "${ns2}" \ ++ "id 0 flags subflow 10.0.3.2" "id 0 subflow" ++ chk_join_nr 1 1 1 ++ chk_mptcp_info subflows 1 subflows 1 ++ chk_subflows_total 2 2 + kill_events_pids + mptcp_lib_kill_wait $tests_pid + fi +@@ -3578,6 +3713,7 @@ endpoint_tests() + + if reset_with_tcp_filter "delete and re-add" ns2 10.0.3.2 REJECT OUTPUT && + mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then ++ start_events + pm_nl_set_limits $ns1 0 3 + pm_nl_set_limits $ns2 0 3 + pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow +@@ -3629,9 +3765,129 @@ endpoint_tests() + + mptcp_lib_kill_wait $tests_pid + ++ kill_events_pids ++ chk_evt_nr ns1 MPTCP_LIB_EVENT_LISTENER_CREATED 1 ++ chk_evt_nr ns1 MPTCP_LIB_EVENT_CREATED 1 ++ chk_evt_nr ns1 MPTCP_LIB_EVENT_ESTABLISHED 1 ++ chk_evt_nr ns1 MPTCP_LIB_EVENT_ANNOUNCED 0 ++ chk_evt_nr ns1 MPTCP_LIB_EVENT_REMOVED 4 ++ chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_ESTABLISHED 6 ++ chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_CLOSED 4 ++ ++ chk_evt_nr ns2 MPTCP_LIB_EVENT_CREATED 1 ++ chk_evt_nr ns2 MPTCP_LIB_EVENT_ESTABLISHED 1 ++ chk_evt_nr ns2 MPTCP_LIB_EVENT_ANNOUNCED 0 ++ chk_evt_nr ns2 MPTCP_LIB_EVENT_REMOVED 0 ++ chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_ESTABLISHED 6 ++ chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_CLOSED 5 # one has been closed before estab ++ + chk_join_nr 6 6 6 + chk_rm_nr 4 4 + fi ++ ++ # remove and re-add ++ if reset_with_events "delete re-add signal" && ++ mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then ++ pm_nl_set_limits $ns1 0 3 ++ pm_nl_set_limits $ns2 3 3 ++ pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal ++ # broadcast IP: no packet for this address will be received on ns1 ++ pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal ++ pm_nl_add_endpoint $ns1 10.0.1.1 id 42 flags signal ++ test_linkfail=4 speed=5 \ ++ run_tests $ns1 $ns2 10.0.1.1 & ++ local tests_pid=$! ++ ++ wait_mpj $ns2 ++ pm_nl_check_endpoint "creation" \ ++ $ns1 10.0.2.1 id 1 flags signal ++ chk_subflow_nr "before delete" 2 ++ chk_mptcp_info subflows 1 subflows 1 ++ ++ pm_nl_del_endpoint $ns1 1 10.0.2.1 ++ pm_nl_del_endpoint $ns1 2 224.0.0.1 ++ sleep 0.5 ++ chk_subflow_nr "after delete" 1 ++ chk_mptcp_info subflows 0 subflows 0 ++ ++ pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal ++ pm_nl_add_endpoint $ns1 10.0.3.1 id 2 flags signal ++ wait_mpj $ns2 ++ chk_subflow_nr "after re-add" 3 ++ chk_mptcp_info subflows 2 subflows 2 ++ ++ pm_nl_del_endpoint $ns1 42 10.0.1.1 ++ sleep 0.5 ++ chk_subflow_nr "after delete ID 0" 2 ++ chk_mptcp_info subflows 2 subflows 2 ++ ++ pm_nl_add_endpoint $ns1 10.0.1.1 id 99 flags signal ++ wait_mpj $ns2 ++ chk_subflow_nr "after re-add ID 0" 3 ++ chk_mptcp_info subflows 3 subflows 3 ++ ++ pm_nl_del_endpoint $ns1 99 10.0.1.1 ++ sleep 0.5 ++ chk_subflow_nr "after re-delete ID 0" 2 ++ chk_mptcp_info subflows 2 subflows 2 ++ ++ pm_nl_add_endpoint $ns1 10.0.1.1 id 88 flags signal ++ wait_mpj $ns2 ++ chk_subflow_nr "after re-re-add ID 0" 3 ++ chk_mptcp_info subflows 3 subflows 3 ++ mptcp_lib_kill_wait $tests_pid ++ ++ kill_events_pids ++ chk_evt_nr ns1 MPTCP_LIB_EVENT_LISTENER_CREATED 1 ++ chk_evt_nr ns1 MPTCP_LIB_EVENT_CREATED 1 ++ chk_evt_nr ns1 MPTCP_LIB_EVENT_ESTABLISHED 1 ++ chk_evt_nr ns1 MPTCP_LIB_EVENT_ANNOUNCED 0 ++ chk_evt_nr ns1 MPTCP_LIB_EVENT_REMOVED 0 ++ chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_ESTABLISHED 5 ++ chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_CLOSED 3 ++ ++ chk_evt_nr ns2 MPTCP_LIB_EVENT_CREATED 1 ++ chk_evt_nr ns2 MPTCP_LIB_EVENT_ESTABLISHED 1 ++ chk_evt_nr ns2 MPTCP_LIB_EVENT_ANNOUNCED 6 ++ chk_evt_nr ns2 MPTCP_LIB_EVENT_REMOVED 4 ++ chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_ESTABLISHED 5 ++ chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_CLOSED 3 ++ ++ chk_join_nr 5 5 5 ++ chk_add_nr 6 6 ++ chk_rm_nr 4 3 invert ++ fi ++ ++ # flush and re-add ++ if reset_with_tcp_filter "flush re-add" ns2 10.0.3.2 REJECT OUTPUT && ++ mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then ++ pm_nl_set_limits $ns1 0 2 ++ pm_nl_set_limits $ns2 1 2 ++ # broadcast IP: no packet for this address will be received on ns1 ++ pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal ++ pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow ++ test_linkfail=4 speed=20 \ ++ run_tests $ns1 $ns2 10.0.1.1 & ++ local tests_pid=$! ++ ++ wait_attempt_fail $ns2 ++ chk_subflow_nr "before flush" 1 ++ chk_mptcp_info subflows 0 subflows 0 ++ ++ pm_nl_flush_endpoint $ns2 ++ pm_nl_flush_endpoint $ns1 ++ wait_rm_addr $ns2 0 ++ ip netns exec "${ns2}" ${iptables} -D OUTPUT -s "10.0.3.2" -p tcp -j REJECT ++ pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow ++ wait_mpj $ns2 ++ pm_nl_add_endpoint $ns1 10.0.3.1 id 2 flags signal ++ wait_mpj $ns2 ++ mptcp_lib_kill_wait $tests_pid ++ ++ chk_join_nr 2 2 2 ++ chk_add_nr 2 2 ++ chk_rm_nr 1 0 invert ++ fi + } + + # [$1: error message] +diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh +index 8939d5c135a0e..869c8eda4bc34 100644 +--- a/tools/testing/selftests/net/mptcp/mptcp_lib.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh +@@ -8,6 +8,32 @@ readonly KSFT_SKIP=4 + # shellcheck disable=SC2155 # declare and assign separately + readonly KSFT_TEST="${MPTCP_LIB_KSFT_TEST:-$(basename "${0}" .sh)}" + ++# These variables are used in some selftests, read-only ++declare -rx MPTCP_LIB_EVENT_ANNOUNCED=6 # MPTCP_EVENT_ANNOUNCED ++declare -rx MPTCP_LIB_EVENT_REMOVED=7 # MPTCP_EVENT_REMOVED ++declare -rx MPTCP_LIB_EVENT_SUB_ESTABLISHED=10 # MPTCP_EVENT_SUB_ESTABLISHED ++declare -rx MPTCP_LIB_EVENT_SUB_CLOSED=11 # MPTCP_EVENT_SUB_CLOSED ++declare -rx MPTCP_LIB_EVENT_LISTENER_CREATED=15 # MPTCP_EVENT_LISTENER_CREATED ++declare -rx MPTCP_LIB_EVENT_LISTENER_CLOSED=16 # MPTCP_EVENT_LISTENER_CLOSED ++ ++declare -rx MPTCP_LIB_AF_INET=2 ++declare -rx MPTCP_LIB_AF_INET6=10 ++ ++# These variables are used in some selftests, read-only ++declare -rx MPTCP_LIB_EVENT_CREATED=1 # MPTCP_EVENT_CREATED ++declare -rx MPTCP_LIB_EVENT_ESTABLISHED=2 # MPTCP_EVENT_ESTABLISHED ++declare -rx MPTCP_LIB_EVENT_CLOSED=3 # MPTCP_EVENT_CLOSED ++declare -rx MPTCP_LIB_EVENT_ANNOUNCED=6 # MPTCP_EVENT_ANNOUNCED ++declare -rx MPTCP_LIB_EVENT_REMOVED=7 # MPTCP_EVENT_REMOVED ++declare -rx MPTCP_LIB_EVENT_SUB_ESTABLISHED=10 # MPTCP_EVENT_SUB_ESTABLISHED ++declare -rx MPTCP_LIB_EVENT_SUB_CLOSED=11 # MPTCP_EVENT_SUB_CLOSED ++declare -rx MPTCP_LIB_EVENT_SUB_PRIORITY=13 # MPTCP_EVENT_SUB_PRIORITY ++declare -rx MPTCP_LIB_EVENT_LISTENER_CREATED=15 # MPTCP_EVENT_LISTENER_CREATED ++declare -rx MPTCP_LIB_EVENT_LISTENER_CLOSED=16 # MPTCP_EVENT_LISTENER_CLOSED ++ ++declare -rx MPTCP_LIB_AF_INET=2 ++declare -rx MPTCP_LIB_AF_INET6=10 ++ + MPTCP_LIB_SUBTESTS=() + + # only if supported (or forced) and not disabled, see no-color.org +@@ -247,3 +273,15 @@ mptcp_lib_get_counter() { + + echo "${count}" + } ++ ++mptcp_lib_events() { ++ local ns="${1}" ++ local evts="${2}" ++ declare -n pid="${3}" ++ ++ :>"${evts}" ++ ++ mptcp_lib_kill_wait "${pid:-0}" ++ ip netns exec "${ns}" ./pm_nl_ctl events >> "${evts}" 2>&1 & ++ pid=$! ++} +diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh +index 4e58291550498..c5d7af8e8efde 100755 +--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh ++++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh +@@ -23,15 +23,15 @@ if ! ip -Version &> /dev/null; then + exit ${KSFT_SKIP} + fi + +-ANNOUNCED=6 # MPTCP_EVENT_ANNOUNCED +-REMOVED=7 # MPTCP_EVENT_REMOVED +-SUB_ESTABLISHED=10 # MPTCP_EVENT_SUB_ESTABLISHED +-SUB_CLOSED=11 # MPTCP_EVENT_SUB_CLOSED +-LISTENER_CREATED=15 #MPTCP_EVENT_LISTENER_CREATED +-LISTENER_CLOSED=16 #MPTCP_EVENT_LISTENER_CLOSED ++ANNOUNCED=${MPTCP_LIB_EVENT_ANNOUNCED} ++REMOVED=${MPTCP_LIB_EVENT_REMOVED} ++SUB_ESTABLISHED=${MPTCP_LIB_EVENT_SUB_ESTABLISHED} ++SUB_CLOSED=${MPTCP_LIB_EVENT_SUB_CLOSED} ++LISTENER_CREATED=${MPTCP_LIB_EVENT_LISTENER_CREATED} ++LISTENER_CLOSED=${MPTCP_LIB_EVENT_LISTENER_CLOSED} + +-AF_INET=2 +-AF_INET6=10 ++AF_INET=${MPTCP_LIB_AF_INET} ++AF_INET6=${MPTCP_LIB_AF_INET6} + + file="" + server_evts="" +@@ -201,21 +201,11 @@ make_connection() + if [ -z "$client_evts" ]; then + client_evts=$(mktemp) + fi +- :>"$client_evts" +- if [ $client_evts_pid -ne 0 ]; then +- mptcp_lib_kill_wait $client_evts_pid +- fi +- ip netns exec "$ns2" ./pm_nl_ctl events >> "$client_evts" 2>&1 & +- client_evts_pid=$! ++ mptcp_lib_events "${ns2}" "${client_evts}" client_evts_pid + if [ -z "$server_evts" ]; then + server_evts=$(mktemp) + fi +- :>"$server_evts" +- if [ $server_evts_pid -ne 0 ]; then +- mptcp_lib_kill_wait $server_evts_pid +- fi +- ip netns exec "$ns1" ./pm_nl_ctl events >> "$server_evts" 2>&1 & +- server_evts_pid=$! ++ mptcp_lib_events "${ns1}" "${server_evts}" server_evts_pid + sleep 0.5 + + # Run the server diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.50-51.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.50-51.patch new file mode 100644 index 000000000000..535938cbe4a3 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.50-51.patch @@ -0,0 +1,12247 @@ +diff --git a/MAINTAINERS b/MAINTAINERS +index f09415b2b3c5c..ae4c0cec50736 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -13702,7 +13702,7 @@ M: Mathieu Desnoyers + M: "Paul E. McKenney" + L: linux-kernel@vger.kernel.org + S: Supported +-F: arch/powerpc/include/asm/membarrier.h ++F: arch/*/include/asm/membarrier.h + F: include/uapi/linux/membarrier.h + F: kernel/sched/membarrier.c + +diff --git a/Makefile b/Makefile +index f7efbb59c9865..6dea0c2163682 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 50 ++SUBLEVEL = 51 + EXTRAVERSION = + NAME = Hurr durr I'ma ninja sloth + +diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h +index 16b02f44c7d31..d657b84b6bf70 100644 +--- a/arch/arm/include/asm/pgtable.h ++++ b/arch/arm/include/asm/pgtable.h +@@ -151,6 +151,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + + extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; + ++#define pgdp_get(pgpd) READ_ONCE(*pgdp) ++ + #define pud_page(pud) pmd_page(__pmd(pud_val(pud))) + #define pud_write(pud) pmd_write(__pmd(pud_val(pud))) + +diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h +index 6792a1f83f2ad..a407f9cd549ed 100644 +--- a/arch/arm64/include/asm/acpi.h ++++ b/arch/arm64/include/asm/acpi.h +@@ -119,6 +119,18 @@ static inline u32 get_acpi_id_for_cpu(unsigned int cpu) + return acpi_cpu_get_madt_gicc(cpu)->uid; + } + ++static inline int get_cpu_for_acpi_id(u32 uid) ++{ ++ int cpu; ++ ++ for (cpu = 0; cpu < nr_cpu_ids; cpu++) ++ if (acpi_cpu_get_madt_gicc(cpu) && ++ uid == get_acpi_id_for_cpu(cpu)) ++ return cpu; ++ ++ return -EINVAL; ++} ++ + static inline void arch_fix_phys_package_id(int num, u32 slot) { } + void __init acpi_init_cpus(void); + int apei_claim_sea(struct pt_regs *regs); +diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c +index ccbff21ce1faf..2465f291c7e17 100644 +--- a/arch/arm64/kernel/acpi_numa.c ++++ b/arch/arm64/kernel/acpi_numa.c +@@ -34,17 +34,6 @@ int __init acpi_numa_get_nid(unsigned int cpu) + return acpi_early_node_map[cpu]; + } + +-static inline int get_cpu_for_acpi_id(u32 uid) +-{ +- int cpu; +- +- for (cpu = 0; cpu < nr_cpu_ids; cpu++) +- if (uid == get_acpi_id_for_cpu(cpu)) +- return cpu; +- +- return -EINVAL; +-} +- + static int __init acpi_parse_gicc_pxm(union acpi_subtable_headers *header, + const unsigned long end) + { +diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c +index 1acfa704c8d09..0eddd4a66b874 100644 +--- a/arch/loongarch/kernel/relocate.c ++++ b/arch/loongarch/kernel/relocate.c +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -170,7 +171,7 @@ unsigned long __init relocate_kernel(void) + unsigned long kernel_length; + unsigned long random_offset = 0; + void *location_new = _text; /* Default to original kernel start */ +- char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */ ++ char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */ + + strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE); + +@@ -182,6 +183,7 @@ unsigned long __init relocate_kernel(void) + random_offset = (unsigned long)location_new - (unsigned long)(_text); + #endif + reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS; ++ early_memunmap(cmdline, COMMAND_LINE_SIZE); + + if (random_offset) { + kernel_length = (long)(_end) - (long)(_text); +diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c +index 368e8475870f0..5f6e9e2ebbdbb 100644 +--- a/arch/mips/kernel/cevt-r4k.c ++++ b/arch/mips/kernel/cevt-r4k.c +@@ -303,13 +303,6 @@ int r4k_clockevent_init(void) + if (!c0_compare_int_usable()) + return -ENXIO; + +- /* +- * With vectored interrupts things are getting platform specific. +- * get_c0_compare_int is a hook to allow a platform to return the +- * interrupt number of its liking. +- */ +- irq = get_c0_compare_int(); +- + cd = &per_cpu(mips_clockevent_device, cpu); + + cd->name = "MIPS"; +@@ -320,7 +313,6 @@ int r4k_clockevent_init(void) + min_delta = calculate_min_delta(); + + cd->rating = 300; +- cd->irq = irq; + cd->cpumask = cpumask_of(cpu); + cd->set_next_event = mips_next_event; + cd->event_handler = mips_event_handler; +@@ -332,6 +324,13 @@ int r4k_clockevent_init(void) + + cp0_timer_irq_installed = 1; + ++ /* ++ * With vectored interrupts things are getting platform specific. ++ * get_c0_compare_int is a hook to allow a platform to return the ++ * interrupt number of its liking. ++ */ ++ irq = get_c0_compare_int(); ++ + if (request_irq(irq, c0_compare_interrupt, flags, "timer", + c0_compare_interrupt)) + pr_err("Failed to request irq %d (timer)\n", irq); +diff --git a/arch/powerpc/include/asm/nohash/mmu-e500.h b/arch/powerpc/include/asm/nohash/mmu-e500.h +index 6ddced0415cb5..7dc24b8632d7c 100644 +--- a/arch/powerpc/include/asm/nohash/mmu-e500.h ++++ b/arch/powerpc/include/asm/nohash/mmu-e500.h +@@ -303,8 +303,7 @@ extern unsigned long linear_map_top; + extern int book3e_htw_mode; + + #define PPC_HTW_NONE 0 +-#define PPC_HTW_IBM 1 +-#define PPC_HTW_E6500 2 ++#define PPC_HTW_E6500 1 + + /* + * 64-bit booke platforms don't load the tlb in the tlb miss handler code. +diff --git a/arch/powerpc/kernel/vdso/vdso32.lds.S b/arch/powerpc/kernel/vdso/vdso32.lds.S +index 426e1ccc6971a..8f57107000a24 100644 +--- a/arch/powerpc/kernel/vdso/vdso32.lds.S ++++ b/arch/powerpc/kernel/vdso/vdso32.lds.S +@@ -74,6 +74,8 @@ SECTIONS + .got : { *(.got) } :text + .plt : { *(.plt) } + ++ .rela.dyn : { *(.rela .rela*) } ++ + _end = .; + __end = .; + PROVIDE(end = .); +@@ -87,7 +89,7 @@ SECTIONS + *(.branch_lt) + *(.data .data.* .gnu.linkonce.d.* .sdata*) + *(.bss .sbss .dynbss .dynsbss) +- *(.got1 .glink .iplt .rela*) ++ *(.got1 .glink .iplt) + } + } + +diff --git a/arch/powerpc/kernel/vdso/vdso64.lds.S b/arch/powerpc/kernel/vdso/vdso64.lds.S +index bda6c8cdd459c..400819258c06b 100644 +--- a/arch/powerpc/kernel/vdso/vdso64.lds.S ++++ b/arch/powerpc/kernel/vdso/vdso64.lds.S +@@ -69,7 +69,7 @@ SECTIONS + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr + .eh_frame : { KEEP (*(.eh_frame)) } :text + .gcc_except_table : { *(.gcc_except_table) } +- .rela.dyn ALIGN(8) : { *(.rela.dyn) } ++ .rela.dyn ALIGN(8) : { *(.rela .rela*) } + + .got ALIGN(8) : { *(.got .toc) } + +@@ -86,7 +86,7 @@ SECTIONS + *(.data .data.* .gnu.linkonce.d.* .sdata*) + *(.bss .sbss .dynbss .dynsbss) + *(.opd) +- *(.glink .iplt .plt .rela*) ++ *(.glink .iplt .plt) + } + } + +diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c +index 6dd2f46bd3ef6..8830267789c9c 100644 +--- a/arch/powerpc/lib/qspinlock.c ++++ b/arch/powerpc/lib/qspinlock.c +@@ -715,7 +715,15 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b + } + + release: +- qnodesp->count--; /* release the node */ ++ /* ++ * Clear the lock before releasing the node, as another CPU might see stale ++ * values if an interrupt occurs after we increment qnodesp->count ++ * but before node->lock is initialized. The barrier ensures that ++ * there are no further stores to the node after it has been released. ++ */ ++ node->lock = NULL; ++ barrier(); ++ qnodesp->count--; + } + + void queued_spin_lock_slowpath(struct qspinlock *lock) +diff --git a/arch/powerpc/mm/nohash/Makefile b/arch/powerpc/mm/nohash/Makefile +index f3894e79d5f70..24b445a5fcacc 100644 +--- a/arch/powerpc/mm/nohash/Makefile ++++ b/arch/powerpc/mm/nohash/Makefile +@@ -3,7 +3,7 @@ + ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) + + obj-y += mmu_context.o tlb.o tlb_low.o kup.o +-obj-$(CONFIG_PPC_BOOK3E_64) += tlb_low_64e.o book3e_pgtable.o ++obj-$(CONFIG_PPC_BOOK3E_64) += tlb_64e.o tlb_low_64e.o book3e_pgtable.o + obj-$(CONFIG_40x) += 40x.o + obj-$(CONFIG_44x) += 44x.o + obj-$(CONFIG_PPC_8xx) += 8xx.o +diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c +index 5ffa0af4328af..f57dc721d0636 100644 +--- a/arch/powerpc/mm/nohash/tlb.c ++++ b/arch/powerpc/mm/nohash/tlb.c +@@ -110,28 +110,6 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { + }; + #endif + +-/* The variables below are currently only used on 64-bit Book3E +- * though this will probably be made common with other nohash +- * implementations at some point +- */ +-#ifdef CONFIG_PPC64 +- +-int mmu_pte_psize; /* Page size used for PTE pages */ +-int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ +-int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */ +-unsigned long linear_map_top; /* Top of linear mapping */ +- +- +-/* +- * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug +- * exceptions. This is used for bolted and e6500 TLB miss handlers which +- * do not modify this SPRG in the TLB miss code; for other TLB miss handlers, +- * this is set to zero. +- */ +-int extlb_level_exc; +- +-#endif /* CONFIG_PPC64 */ +- + #ifdef CONFIG_PPC_E500 + /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */ + DEFINE_PER_CPU(int, next_tlbcam_idx); +@@ -358,381 +336,7 @@ void tlb_flush(struct mmu_gather *tlb) + flush_tlb_mm(tlb->mm); + } + +-/* +- * Below are functions specific to the 64-bit variant of Book3E though that +- * may change in the future +- */ +- +-#ifdef CONFIG_PPC64 +- +-/* +- * Handling of virtual linear page tables or indirect TLB entries +- * flushing when PTE pages are freed +- */ +-void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) +-{ +- int tsize = mmu_psize_defs[mmu_pte_psize].enc; +- +- if (book3e_htw_mode != PPC_HTW_NONE) { +- unsigned long start = address & PMD_MASK; +- unsigned long end = address + PMD_SIZE; +- unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; +- +- /* This isn't the most optimal, ideally we would factor out the +- * while preempt & CPU mask mucking around, or even the IPI but +- * it will do for now +- */ +- while (start < end) { +- __flush_tlb_page(tlb->mm, start, tsize, 1); +- start += size; +- } +- } else { +- unsigned long rmask = 0xf000000000000000ul; +- unsigned long rid = (address & rmask) | 0x1000000000000000ul; +- unsigned long vpte = address & ~rmask; +- +- vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful; +- vpte |= rid; +- __flush_tlb_page(tlb->mm, vpte, tsize, 0); +- } +-} +- +-static void __init setup_page_sizes(void) +-{ +- unsigned int tlb0cfg; +- unsigned int tlb0ps; +- unsigned int eptcfg; +- int i, psize; +- +-#ifdef CONFIG_PPC_E500 +- unsigned int mmucfg = mfspr(SPRN_MMUCFG); +- int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E); +- +- if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) { +- unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG); +- unsigned int min_pg, max_pg; +- +- min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT; +- max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT; +- +- for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { +- struct mmu_psize_def *def; +- unsigned int shift; +- +- def = &mmu_psize_defs[psize]; +- shift = def->shift; +- +- if (shift == 0 || shift & 1) +- continue; +- +- /* adjust to be in terms of 4^shift Kb */ +- shift = (shift - 10) >> 1; +- +- if ((shift >= min_pg) && (shift <= max_pg)) +- def->flags |= MMU_PAGE_SIZE_DIRECT; +- } +- +- goto out; +- } +- +- if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { +- u32 tlb1cfg, tlb1ps; +- +- tlb0cfg = mfspr(SPRN_TLB0CFG); +- tlb1cfg = mfspr(SPRN_TLB1CFG); +- tlb1ps = mfspr(SPRN_TLB1PS); +- eptcfg = mfspr(SPRN_EPTCFG); +- +- if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT)) +- book3e_htw_mode = PPC_HTW_E6500; +- +- /* +- * We expect 4K subpage size and unrestricted indirect size. +- * The lack of a restriction on indirect size is a Freescale +- * extension, indicated by PSn = 0 but SPSn != 0. +- */ +- if (eptcfg != 2) +- book3e_htw_mode = PPC_HTW_NONE; +- +- for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { +- struct mmu_psize_def *def = &mmu_psize_defs[psize]; +- +- if (!def->shift) +- continue; +- +- if (tlb1ps & (1U << (def->shift - 10))) { +- def->flags |= MMU_PAGE_SIZE_DIRECT; +- +- if (book3e_htw_mode && psize == MMU_PAGE_2M) +- def->flags |= MMU_PAGE_SIZE_INDIRECT; +- } +- } +- +- goto out; +- } +-#endif +- +- tlb0cfg = mfspr(SPRN_TLB0CFG); +- tlb0ps = mfspr(SPRN_TLB0PS); +- eptcfg = mfspr(SPRN_EPTCFG); +- +- /* Look for supported direct sizes */ +- for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { +- struct mmu_psize_def *def = &mmu_psize_defs[psize]; +- +- if (tlb0ps & (1U << (def->shift - 10))) +- def->flags |= MMU_PAGE_SIZE_DIRECT; +- } +- +- /* Indirect page sizes supported ? */ +- if ((tlb0cfg & TLBnCFG_IND) == 0 || +- (tlb0cfg & TLBnCFG_PT) == 0) +- goto out; +- +- book3e_htw_mode = PPC_HTW_IBM; +- +- /* Now, we only deal with one IND page size for each +- * direct size. Hopefully all implementations today are +- * unambiguous, but we might want to be careful in the +- * future. +- */ +- for (i = 0; i < 3; i++) { +- unsigned int ps, sps; +- +- sps = eptcfg & 0x1f; +- eptcfg >>= 5; +- ps = eptcfg & 0x1f; +- eptcfg >>= 5; +- if (!ps || !sps) +- continue; +- for (psize = 0; psize < MMU_PAGE_COUNT; psize++) { +- struct mmu_psize_def *def = &mmu_psize_defs[psize]; +- +- if (ps == (def->shift - 10)) +- def->flags |= MMU_PAGE_SIZE_INDIRECT; +- if (sps == (def->shift - 10)) +- def->ind = ps + 10; +- } +- } +- +-out: +- /* Cleanup array and print summary */ +- pr_info("MMU: Supported page sizes\n"); +- for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { +- struct mmu_psize_def *def = &mmu_psize_defs[psize]; +- const char *__page_type_names[] = { +- "unsupported", +- "direct", +- "indirect", +- "direct & indirect" +- }; +- if (def->flags == 0) { +- def->shift = 0; +- continue; +- } +- pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10), +- __page_type_names[def->flags & 0x3]); +- } +-} +- +-static void __init setup_mmu_htw(void) +-{ +- /* +- * If we want to use HW tablewalk, enable it by patching the TLB miss +- * handlers to branch to the one dedicated to it. +- */ +- +- switch (book3e_htw_mode) { +- case PPC_HTW_IBM: +- patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e); +- patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e); +- break; +-#ifdef CONFIG_PPC_E500 +- case PPC_HTW_E6500: +- extlb_level_exc = EX_TLB_SIZE; +- patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e); +- patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e); +- break; +-#endif +- } +- pr_info("MMU: Book3E HW tablewalk %s\n", +- book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported"); +-} +- +-/* +- * Early initialization of the MMU TLB code +- */ +-static void early_init_this_mmu(void) +-{ +- unsigned int mas4; +- +- /* Set MAS4 based on page table setting */ +- +- mas4 = 0x4 << MAS4_WIMGED_SHIFT; +- switch (book3e_htw_mode) { +- case PPC_HTW_E6500: +- mas4 |= MAS4_INDD; +- mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT; +- mas4 |= MAS4_TLBSELD(1); +- mmu_pte_psize = MMU_PAGE_2M; +- break; +- +- case PPC_HTW_IBM: +- mas4 |= MAS4_INDD; +- mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT; +- mmu_pte_psize = MMU_PAGE_1M; +- break; +- +- case PPC_HTW_NONE: +- mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT; +- mmu_pte_psize = mmu_virtual_psize; +- break; +- } +- mtspr(SPRN_MAS4, mas4); +- +-#ifdef CONFIG_PPC_E500 +- if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { +- unsigned int num_cams; +- bool map = true; +- +- /* use a quarter of the TLBCAM for bolted linear map */ +- num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; +- +- /* +- * Only do the mapping once per core, or else the +- * transient mapping would cause problems. +- */ +-#ifdef CONFIG_SMP +- if (hweight32(get_tensr()) > 1) +- map = false; +-#endif +- +- if (map) +- linear_map_top = map_mem_in_cams(linear_map_top, +- num_cams, false, true); +- } +-#endif +- +- /* A sync won't hurt us after mucking around with +- * the MMU configuration +- */ +- mb(); +-} +- +-static void __init early_init_mmu_global(void) +-{ +- /* XXX This should be decided at runtime based on supported +- * page sizes in the TLB, but for now let's assume 16M is +- * always there and a good fit (which it probably is) +- * +- * Freescale booke only supports 4K pages in TLB0, so use that. +- */ +- if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) +- mmu_vmemmap_psize = MMU_PAGE_4K; +- else +- mmu_vmemmap_psize = MMU_PAGE_16M; +- +- /* XXX This code only checks for TLB 0 capabilities and doesn't +- * check what page size combos are supported by the HW. It +- * also doesn't handle the case where a separate array holds +- * the IND entries from the array loaded by the PT. +- */ +- /* Look for supported page sizes */ +- setup_page_sizes(); +- +- /* Look for HW tablewalk support */ +- setup_mmu_htw(); +- +-#ifdef CONFIG_PPC_E500 +- if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { +- if (book3e_htw_mode == PPC_HTW_NONE) { +- extlb_level_exc = EX_TLB_SIZE; +- patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); +- patch_exception(0x1e0, +- exc_instruction_tlb_miss_bolted_book3e); +- } +- } +-#endif +- +- /* Set the global containing the top of the linear mapping +- * for use by the TLB miss code +- */ +- linear_map_top = memblock_end_of_DRAM(); +- +- ioremap_bot = IOREMAP_BASE; +-} +- +-static void __init early_mmu_set_memory_limit(void) +-{ +-#ifdef CONFIG_PPC_E500 +- if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { +- /* +- * Limit memory so we dont have linear faults. +- * Unlike memblock_set_current_limit, which limits +- * memory available during early boot, this permanently +- * reduces the memory available to Linux. We need to +- * do this because highmem is not supported on 64-bit. +- */ +- memblock_enforce_memory_limit(linear_map_top); +- } +-#endif +- +- memblock_set_current_limit(linear_map_top); +-} +- +-/* boot cpu only */ +-void __init early_init_mmu(void) +-{ +- early_init_mmu_global(); +- early_init_this_mmu(); +- early_mmu_set_memory_limit(); +-} +- +-void early_init_mmu_secondary(void) +-{ +- early_init_this_mmu(); +-} +- +-void setup_initial_memory_limit(phys_addr_t first_memblock_base, +- phys_addr_t first_memblock_size) +-{ +- /* On non-FSL Embedded 64-bit, we adjust the RMA size to match +- * the bolted TLB entry. We know for now that only 1G +- * entries are supported though that may eventually +- * change. +- * +- * on FSL Embedded 64-bit, usually all RAM is bolted, but with +- * unusual memory sizes it's possible for some RAM to not be mapped +- * (such RAM is not used at all by Linux, since we don't support +- * highmem on 64-bit). We limit ppc64_rma_size to what would be +- * mappable if this memblock is the only one. Additional memblocks +- * can only increase, not decrease, the amount that ends up getting +- * mapped. We still limit max to 1G even if we'll eventually map +- * more. This is due to what the early init code is set up to do. +- * +- * We crop it to the size of the first MEMBLOCK to +- * avoid going over total available memory just in case... +- */ +-#ifdef CONFIG_PPC_E500 +- if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { +- unsigned long linear_sz; +- unsigned int num_cams; +- +- /* use a quarter of the TLBCAM for bolted linear map */ +- num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; +- +- linear_sz = map_mem_in_cams(first_memblock_size, num_cams, +- true, true); +- +- ppc64_rma_size = min_t(u64, linear_sz, 0x40000000); +- } else +-#endif +- ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); +- +- /* Finally limit subsequent allocations */ +- memblock_set_current_limit(first_memblock_base + ppc64_rma_size); +-} +-#else /* ! CONFIG_PPC64 */ ++#ifndef CONFIG_PPC64 + void __init early_init_mmu(void) + { + unsigned long root = of_get_flat_dt_root(); +diff --git a/arch/powerpc/mm/nohash/tlb_64e.c b/arch/powerpc/mm/nohash/tlb_64e.c +new file mode 100644 +index 0000000000000..b6af3ec4d001d +--- /dev/null ++++ b/arch/powerpc/mm/nohash/tlb_64e.c +@@ -0,0 +1,361 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright 2008,2009 Ben Herrenschmidt ++ * IBM Corp. ++ * ++ * Derived from arch/ppc/mm/init.c: ++ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) ++ * ++ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) ++ * and Cort Dougan (PReP) (cort@cs.nmt.edu) ++ * Copyright (C) 1996 Paul Mackerras ++ * ++ * Derived from "arch/i386/mm/init.c" ++ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++/* The variables below are currently only used on 64-bit Book3E ++ * though this will probably be made common with other nohash ++ * implementations at some point ++ */ ++static int mmu_pte_psize; /* Page size used for PTE pages */ ++int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ ++int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */ ++unsigned long linear_map_top; /* Top of linear mapping */ ++ ++ ++/* ++ * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug ++ * exceptions. This is used for bolted and e6500 TLB miss handlers which ++ * do not modify this SPRG in the TLB miss code; for other TLB miss handlers, ++ * this is set to zero. ++ */ ++int extlb_level_exc; ++ ++/* ++ * Handling of virtual linear page tables or indirect TLB entries ++ * flushing when PTE pages are freed ++ */ ++void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) ++{ ++ int tsize = mmu_psize_defs[mmu_pte_psize].enc; ++ ++ if (book3e_htw_mode != PPC_HTW_NONE) { ++ unsigned long start = address & PMD_MASK; ++ unsigned long end = address + PMD_SIZE; ++ unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; ++ ++ /* This isn't the most optimal, ideally we would factor out the ++ * while preempt & CPU mask mucking around, or even the IPI but ++ * it will do for now ++ */ ++ while (start < end) { ++ __flush_tlb_page(tlb->mm, start, tsize, 1); ++ start += size; ++ } ++ } else { ++ unsigned long rmask = 0xf000000000000000ul; ++ unsigned long rid = (address & rmask) | 0x1000000000000000ul; ++ unsigned long vpte = address & ~rmask; ++ ++ vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful; ++ vpte |= rid; ++ __flush_tlb_page(tlb->mm, vpte, tsize, 0); ++ } ++} ++ ++static void __init setup_page_sizes(void) ++{ ++ unsigned int tlb0cfg; ++ unsigned int eptcfg; ++ int psize; ++ ++#ifdef CONFIG_PPC_E500 ++ unsigned int mmucfg = mfspr(SPRN_MMUCFG); ++ int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E); ++ ++ if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) { ++ unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG); ++ unsigned int min_pg, max_pg; ++ ++ min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT; ++ max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT; ++ ++ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { ++ struct mmu_psize_def *def; ++ unsigned int shift; ++ ++ def = &mmu_psize_defs[psize]; ++ shift = def->shift; ++ ++ if (shift == 0 || shift & 1) ++ continue; ++ ++ /* adjust to be in terms of 4^shift Kb */ ++ shift = (shift - 10) >> 1; ++ ++ if ((shift >= min_pg) && (shift <= max_pg)) ++ def->flags |= MMU_PAGE_SIZE_DIRECT; ++ } ++ ++ goto out; ++ } ++ ++ if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { ++ u32 tlb1cfg, tlb1ps; ++ ++ tlb0cfg = mfspr(SPRN_TLB0CFG); ++ tlb1cfg = mfspr(SPRN_TLB1CFG); ++ tlb1ps = mfspr(SPRN_TLB1PS); ++ eptcfg = mfspr(SPRN_EPTCFG); ++ ++ if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT)) ++ book3e_htw_mode = PPC_HTW_E6500; ++ ++ /* ++ * We expect 4K subpage size and unrestricted indirect size. ++ * The lack of a restriction on indirect size is a Freescale ++ * extension, indicated by PSn = 0 but SPSn != 0. ++ */ ++ if (eptcfg != 2) ++ book3e_htw_mode = PPC_HTW_NONE; ++ ++ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { ++ struct mmu_psize_def *def = &mmu_psize_defs[psize]; ++ ++ if (!def->shift) ++ continue; ++ ++ if (tlb1ps & (1U << (def->shift - 10))) { ++ def->flags |= MMU_PAGE_SIZE_DIRECT; ++ ++ if (book3e_htw_mode && psize == MMU_PAGE_2M) ++ def->flags |= MMU_PAGE_SIZE_INDIRECT; ++ } ++ } ++ ++ goto out; ++ } ++#endif ++out: ++ /* Cleanup array and print summary */ ++ pr_info("MMU: Supported page sizes\n"); ++ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { ++ struct mmu_psize_def *def = &mmu_psize_defs[psize]; ++ const char *__page_type_names[] = { ++ "unsupported", ++ "direct", ++ "indirect", ++ "direct & indirect" ++ }; ++ if (def->flags == 0) { ++ def->shift = 0; ++ continue; ++ } ++ pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10), ++ __page_type_names[def->flags & 0x3]); ++ } ++} ++ ++static void __init setup_mmu_htw(void) ++{ ++ /* ++ * If we want to use HW tablewalk, enable it by patching the TLB miss ++ * handlers to branch to the one dedicated to it. ++ */ ++ ++ switch (book3e_htw_mode) { ++#ifdef CONFIG_PPC_E500 ++ case PPC_HTW_E6500: ++ extlb_level_exc = EX_TLB_SIZE; ++ patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e); ++ patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e); ++ break; ++#endif ++ } ++ pr_info("MMU: Book3E HW tablewalk %s\n", ++ book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported"); ++} ++ ++/* ++ * Early initialization of the MMU TLB code ++ */ ++static void early_init_this_mmu(void) ++{ ++ unsigned int mas4; ++ ++ /* Set MAS4 based on page table setting */ ++ ++ mas4 = 0x4 << MAS4_WIMGED_SHIFT; ++ switch (book3e_htw_mode) { ++ case PPC_HTW_E6500: ++ mas4 |= MAS4_INDD; ++ mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT; ++ mas4 |= MAS4_TLBSELD(1); ++ mmu_pte_psize = MMU_PAGE_2M; ++ break; ++ ++ case PPC_HTW_NONE: ++ mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT; ++ mmu_pte_psize = mmu_virtual_psize; ++ break; ++ } ++ mtspr(SPRN_MAS4, mas4); ++ ++#ifdef CONFIG_PPC_E500 ++ if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { ++ unsigned int num_cams; ++ bool map = true; ++ ++ /* use a quarter of the TLBCAM for bolted linear map */ ++ num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; ++ ++ /* ++ * Only do the mapping once per core, or else the ++ * transient mapping would cause problems. ++ */ ++#ifdef CONFIG_SMP ++ if (hweight32(get_tensr()) > 1) ++ map = false; ++#endif ++ ++ if (map) ++ linear_map_top = map_mem_in_cams(linear_map_top, ++ num_cams, false, true); ++ } ++#endif ++ ++ /* A sync won't hurt us after mucking around with ++ * the MMU configuration ++ */ ++ mb(); ++} ++ ++static void __init early_init_mmu_global(void) ++{ ++ /* XXX This should be decided at runtime based on supported ++ * page sizes in the TLB, but for now let's assume 16M is ++ * always there and a good fit (which it probably is) ++ * ++ * Freescale booke only supports 4K pages in TLB0, so use that. ++ */ ++ if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) ++ mmu_vmemmap_psize = MMU_PAGE_4K; ++ else ++ mmu_vmemmap_psize = MMU_PAGE_16M; ++ ++ /* XXX This code only checks for TLB 0 capabilities and doesn't ++ * check what page size combos are supported by the HW. It ++ * also doesn't handle the case where a separate array holds ++ * the IND entries from the array loaded by the PT. ++ */ ++ /* Look for supported page sizes */ ++ setup_page_sizes(); ++ ++ /* Look for HW tablewalk support */ ++ setup_mmu_htw(); ++ ++#ifdef CONFIG_PPC_E500 ++ if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { ++ if (book3e_htw_mode == PPC_HTW_NONE) { ++ extlb_level_exc = EX_TLB_SIZE; ++ patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); ++ patch_exception(0x1e0, ++ exc_instruction_tlb_miss_bolted_book3e); ++ } ++ } ++#endif ++ ++ /* Set the global containing the top of the linear mapping ++ * for use by the TLB miss code ++ */ ++ linear_map_top = memblock_end_of_DRAM(); ++ ++ ioremap_bot = IOREMAP_BASE; ++} ++ ++static void __init early_mmu_set_memory_limit(void) ++{ ++#ifdef CONFIG_PPC_E500 ++ if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { ++ /* ++ * Limit memory so we dont have linear faults. ++ * Unlike memblock_set_current_limit, which limits ++ * memory available during early boot, this permanently ++ * reduces the memory available to Linux. We need to ++ * do this because highmem is not supported on 64-bit. ++ */ ++ memblock_enforce_memory_limit(linear_map_top); ++ } ++#endif ++ ++ memblock_set_current_limit(linear_map_top); ++} ++ ++/* boot cpu only */ ++void __init early_init_mmu(void) ++{ ++ early_init_mmu_global(); ++ early_init_this_mmu(); ++ early_mmu_set_memory_limit(); ++} ++ ++void early_init_mmu_secondary(void) ++{ ++ early_init_this_mmu(); ++} ++ ++void setup_initial_memory_limit(phys_addr_t first_memblock_base, ++ phys_addr_t first_memblock_size) ++{ ++ /* On non-FSL Embedded 64-bit, we adjust the RMA size to match ++ * the bolted TLB entry. We know for now that only 1G ++ * entries are supported though that may eventually ++ * change. ++ * ++ * on FSL Embedded 64-bit, usually all RAM is bolted, but with ++ * unusual memory sizes it's possible for some RAM to not be mapped ++ * (such RAM is not used at all by Linux, since we don't support ++ * highmem on 64-bit). We limit ppc64_rma_size to what would be ++ * mappable if this memblock is the only one. Additional memblocks ++ * can only increase, not decrease, the amount that ends up getting ++ * mapped. We still limit max to 1G even if we'll eventually map ++ * more. This is due to what the early init code is set up to do. ++ * ++ * We crop it to the size of the first MEMBLOCK to ++ * avoid going over total available memory just in case... ++ */ ++#ifdef CONFIG_PPC_E500 ++ if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { ++ unsigned long linear_sz; ++ unsigned int num_cams; ++ ++ /* use a quarter of the TLBCAM for bolted linear map */ ++ num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; ++ ++ linear_sz = map_mem_in_cams(first_memblock_size, num_cams, ++ true, true); ++ ++ ppc64_rma_size = min_t(u64, linear_sz, 0x40000000); ++ } else ++#endif ++ ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); ++ ++ /* Finally limit subsequent allocations */ ++ memblock_set_current_limit(first_memblock_base + ppc64_rma_size); ++} +diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S +index 7e0b8fe1c2797..b0eb3f7eaed14 100644 +--- a/arch/powerpc/mm/nohash/tlb_low_64e.S ++++ b/arch/powerpc/mm/nohash/tlb_low_64e.S +@@ -893,201 +893,6 @@ virt_page_table_tlb_miss_whacko_fault: + TLB_MISS_EPILOG_ERROR + b exc_data_storage_book3e + +- +-/************************************************************** +- * * +- * TLB miss handling for Book3E with hw page table support * +- * * +- **************************************************************/ +- +- +-/* Data TLB miss */ +- START_EXCEPTION(data_tlb_miss_htw) +- TLB_MISS_PROLOG +- +- /* Now we handle the fault proper. We only save DEAR in normal +- * fault case since that's the only interesting values here. +- * We could probably also optimize by not saving SRR0/1 in the +- * linear mapping case but I'll leave that for later +- */ +- mfspr r14,SPRN_ESR +- mfspr r16,SPRN_DEAR /* get faulting address */ +- srdi r11,r16,44 /* get region */ +- xoris r11,r11,0xc +- cmpldi cr0,r11,0 /* linear mapping ? */ +- beq tlb_load_linear /* yes -> go to linear map load */ +- cmpldi cr1,r11,1 /* vmalloc mapping ? */ +- +- /* We do the user/kernel test for the PID here along with the RW test +- */ +- srdi. r11,r16,60 /* Check for user region */ +- ld r15,PACAPGD(r13) /* Load user pgdir */ +- beq htw_tlb_miss +- +- /* XXX replace the RMW cycles with immediate loads + writes */ +-1: mfspr r10,SPRN_MAS1 +- rlwinm r10,r10,0,16,1 /* Clear TID */ +- mtspr SPRN_MAS1,r10 +- ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */ +- beq+ cr1,htw_tlb_miss +- +- /* We got a crappy address, just fault with whatever DEAR and ESR +- * are here +- */ +- TLB_MISS_EPILOG_ERROR +- b exc_data_storage_book3e +- +-/* Instruction TLB miss */ +- START_EXCEPTION(instruction_tlb_miss_htw) +- TLB_MISS_PROLOG +- +- /* If we take a recursive fault, the second level handler may need +- * to know whether we are handling a data or instruction fault in +- * order to get to the right store fault handler. We provide that +- * info by keeping a crazy value for ESR in r14 +- */ +- li r14,-1 /* store to exception frame is done later */ +- +- /* Now we handle the fault proper. We only save DEAR in the non +- * linear mapping case since we know the linear mapping case will +- * not re-enter. We could indeed optimize and also not save SRR0/1 +- * in the linear mapping case but I'll leave that for later +- * +- * Faulting address is SRR0 which is already in r16 +- */ +- srdi r11,r16,44 /* get region */ +- xoris r11,r11,0xc +- cmpldi cr0,r11,0 /* linear mapping ? */ +- beq tlb_load_linear /* yes -> go to linear map load */ +- cmpldi cr1,r11,1 /* vmalloc mapping ? */ +- +- /* We do the user/kernel test for the PID here along with the RW test +- */ +- srdi. r11,r16,60 /* Check for user region */ +- ld r15,PACAPGD(r13) /* Load user pgdir */ +- beq htw_tlb_miss +- +- /* XXX replace the RMW cycles with immediate loads + writes */ +-1: mfspr r10,SPRN_MAS1 +- rlwinm r10,r10,0,16,1 /* Clear TID */ +- mtspr SPRN_MAS1,r10 +- ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */ +- beq+ htw_tlb_miss +- +- /* We got a crappy address, just fault */ +- TLB_MISS_EPILOG_ERROR +- b exc_instruction_storage_book3e +- +- +-/* +- * This is the guts of the second-level TLB miss handler for direct +- * misses. We are entered with: +- * +- * r16 = virtual page table faulting address +- * r15 = PGD pointer +- * r14 = ESR +- * r13 = PACA +- * r12 = TLB exception frame in PACA +- * r11 = crap (free to use) +- * r10 = crap (free to use) +- * +- * It can be re-entered by the linear mapping miss handler. However, to +- * avoid too much complication, it will save/restore things for us +- */ +-htw_tlb_miss: +-#ifdef CONFIG_PPC_KUAP +- mfspr r10,SPRN_MAS1 +- rlwinm. r10,r10,0,0x3fff0000 +- beq- htw_tlb_miss_fault /* KUAP fault */ +-#endif +- /* Search if we already have a TLB entry for that virtual address, and +- * if we do, bail out. +- * +- * MAS1:IND should be already set based on MAS4 +- */ +- PPC_TLBSRX_DOT(0,R16) +- beq htw_tlb_miss_done +- +- /* Now, we need to walk the page tables. First check if we are in +- * range. +- */ +- rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 +- bne- htw_tlb_miss_fault +- +- /* Get the PGD pointer */ +- cmpldi cr0,r15,0 +- beq- htw_tlb_miss_fault +- +- /* Get to PGD entry */ +- rldicl r11,r16,64-(PGDIR_SHIFT-3),64-PGD_INDEX_SIZE-3 +- clrrdi r10,r11,3 +- ldx r15,r10,r15 +- cmpdi cr0,r15,0 +- bge htw_tlb_miss_fault +- +- /* Get to PUD entry */ +- rldicl r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3 +- clrrdi r10,r11,3 +- ldx r15,r10,r15 +- cmpdi cr0,r15,0 +- bge htw_tlb_miss_fault +- +- /* Get to PMD entry */ +- rldicl r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3 +- clrrdi r10,r11,3 +- ldx r15,r10,r15 +- cmpdi cr0,r15,0 +- bge htw_tlb_miss_fault +- +- /* Ok, we're all right, we can now create an indirect entry for +- * a 1M or 256M page. +- * +- * The last trick is now that because we use "half" pages for +- * the HTW (1M IND is 2K and 256M IND is 32K) we need to account +- * for an added LSB bit to the RPN. For 64K pages, there is no +- * problem as we already use 32K arrays (half PTE pages), but for +- * 4K page we need to extract a bit from the virtual address and +- * insert it into the "PA52" bit of the RPN. +- */ +- rlwimi r15,r16,32-9,20,20 +- /* Now we build the MAS: +- * +- * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG +- * MAS 1 : Almost fully setup +- * - PID already updated by caller if necessary +- * - TSIZE for now is base ind page size always +- * MAS 2 : Use defaults +- * MAS 3+7 : Needs to be done +- */ +- ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT) +- +- srdi r16,r10,32 +- mtspr SPRN_MAS3,r10 +- mtspr SPRN_MAS7,r16 +- +- tlbwe +- +-htw_tlb_miss_done: +- /* We don't bother with restoring DEAR or ESR since we know we are +- * level 0 and just going back to userland. They are only needed +- * if you are going to take an access fault +- */ +- TLB_MISS_EPILOG_SUCCESS +- rfi +- +-htw_tlb_miss_fault: +- /* We need to check if it was an instruction miss. We know this +- * though because r14 would contain -1 +- */ +- cmpdi cr0,r14,-1 +- beq 1f +- mtspr SPRN_DEAR,r16 +- mtspr SPRN_ESR,r14 +- TLB_MISS_EPILOG_ERROR +- b exc_data_storage_book3e +-1: TLB_MISS_EPILOG_ERROR +- b exc_instruction_storage_book3e +- + /* + * This is the guts of "any" level TLB miss handler for kernel linear + * mapping misses. We are entered with: +diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig +index c785a02005738..fe30b24d52e18 100644 +--- a/arch/riscv/Kconfig ++++ b/arch/riscv/Kconfig +@@ -27,6 +27,7 @@ config RISCV + select ARCH_HAS_GCOV_PROFILE_ALL + select ARCH_HAS_GIGANTIC_PAGE + select ARCH_HAS_KCOV ++ select ARCH_HAS_MEMBARRIER_CALLBACKS + select ARCH_HAS_MMIOWB + select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE + select ARCH_HAS_PMEM_API +@@ -489,8 +490,8 @@ config RISCV_ISA_SVPBMT + config TOOLCHAIN_HAS_V + bool + default y +- depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64iv) +- depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32iv) ++ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64imv) ++ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32imv) + depends on LLD_VERSION >= 140000 || LD_VERSION >= 23800 + depends on AS_HAS_OPTION_ARCH + +diff --git a/arch/riscv/include/asm/kfence.h b/arch/riscv/include/asm/kfence.h +index 0bbffd528096d..7388edd88986f 100644 +--- a/arch/riscv/include/asm/kfence.h ++++ b/arch/riscv/include/asm/kfence.h +@@ -18,9 +18,9 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) + pte_t *pte = virt_to_kpte(addr); + + if (protect) +- set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); ++ set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~_PAGE_PRESENT)); + else +- set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); ++ set_pte(pte, __pte(pte_val(ptep_get(pte)) | _PAGE_PRESENT)); + + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + +diff --git a/arch/riscv/include/asm/membarrier.h b/arch/riscv/include/asm/membarrier.h +new file mode 100644 +index 0000000000000..6c016ebb5020a +--- /dev/null ++++ b/arch/riscv/include/asm/membarrier.h +@@ -0,0 +1,31 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++#ifndef _ASM_RISCV_MEMBARRIER_H ++#define _ASM_RISCV_MEMBARRIER_H ++ ++static inline void membarrier_arch_switch_mm(struct mm_struct *prev, ++ struct mm_struct *next, ++ struct task_struct *tsk) ++{ ++ /* ++ * Only need the full barrier when switching between processes. ++ * Barrier when switching from kernel to userspace is not ++ * required here, given that it is implied by mmdrop(). Barrier ++ * when switching from userspace to kernel is not needed after ++ * store to rq->curr. ++ */ ++ if (IS_ENABLED(CONFIG_SMP) && ++ likely(!(atomic_read(&next->membarrier_state) & ++ (MEMBARRIER_STATE_PRIVATE_EXPEDITED | ++ MEMBARRIER_STATE_GLOBAL_EXPEDITED)) || !prev)) ++ return; ++ ++ /* ++ * The membarrier system call requires a full memory barrier ++ * after storing to rq->curr, before going back to user-space. ++ * Matches a full barrier in the proximity of the membarrier ++ * system call entry. ++ */ ++ smp_mb(); ++} ++ ++#endif /* _ASM_RISCV_MEMBARRIER_H */ +diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h +index 7a5097202e157..3272ca7a5270b 100644 +--- a/arch/riscv/include/asm/pgtable-64.h ++++ b/arch/riscv/include/asm/pgtable-64.h +@@ -198,7 +198,7 @@ static inline int pud_user(pud_t pud) + + static inline void set_pud(pud_t *pudp, pud_t pud) + { +- *pudp = pud; ++ WRITE_ONCE(*pudp, pud); + } + + static inline void pud_clear(pud_t *pudp) +@@ -274,7 +274,7 @@ static inline unsigned long _pmd_pfn(pmd_t pmd) + static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) + { + if (pgtable_l4_enabled) +- *p4dp = p4d; ++ WRITE_ONCE(*p4dp, p4d); + else + set_pud((pud_t *)p4dp, (pud_t){ p4d_val(p4d) }); + } +@@ -336,18 +336,12 @@ static inline struct page *p4d_page(p4d_t p4d) + #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) + + #define pud_offset pud_offset +-static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) +-{ +- if (pgtable_l4_enabled) +- return p4d_pgtable(*p4d) + pud_index(address); +- +- return (pud_t *)p4d; +-} ++pud_t *pud_offset(p4d_t *p4d, unsigned long address); + + static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) + { + if (pgtable_l5_enabled) +- *pgdp = pgd; ++ WRITE_ONCE(*pgdp, pgd); + else + set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) }); + } +@@ -400,12 +394,6 @@ static inline struct page *pgd_page(pgd_t pgd) + #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) + + #define p4d_offset p4d_offset +-static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) +-{ +- if (pgtable_l5_enabled) +- return pgd_pgtable(*pgd) + p4d_index(address); +- +- return (p4d_t *)pgd; +-} ++p4d_t *p4d_offset(pgd_t *pgd, unsigned long address); + + #endif /* _ASM_RISCV_PGTABLE_64_H */ +diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h +index 719c3041ae1c2..37829dab4a0a4 100644 +--- a/arch/riscv/include/asm/pgtable.h ++++ b/arch/riscv/include/asm/pgtable.h +@@ -248,7 +248,7 @@ static inline int pmd_leaf(pmd_t pmd) + + static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) + { +- *pmdp = pmd; ++ WRITE_ONCE(*pmdp, pmd); + } + + static inline void pmd_clear(pmd_t *pmdp) +@@ -515,7 +515,7 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b) + */ + static inline void set_pte(pte_t *ptep, pte_t pteval) + { +- *ptep = pteval; ++ WRITE_ONCE(*ptep, pteval); + } + + void flush_icache_pte(pte_t pte); +@@ -549,19 +549,12 @@ static inline void pte_clear(struct mm_struct *mm, + __set_pte_at(ptep, __pte(0)); + } + +-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +-static inline int ptep_set_access_flags(struct vm_area_struct *vma, +- unsigned long address, pte_t *ptep, +- pte_t entry, int dirty) +-{ +- if (!pte_same(*ptep, entry)) +- __set_pte_at(ptep, entry); +- /* +- * update_mmu_cache will unconditionally execute, handling both +- * the case that the PTE changed and the spurious fault case. +- */ +- return true; +-} ++#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */ ++extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, ++ pte_t *ptep, pte_t entry, int dirty); ++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG /* defined in mm/pgtable.c */ ++extern int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, ++ pte_t *ptep); + + #define __HAVE_ARCH_PTEP_GET_AND_CLEAR + static inline pte_t ptep_get_and_clear(struct mm_struct *mm, +@@ -574,16 +567,6 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + return pte; + } + +-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, +- unsigned long address, +- pte_t *ptep) +-{ +- if (!pte_young(*ptep)) +- return 0; +- return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep)); +-} +- + #define __HAVE_ARCH_PTEP_SET_WRPROTECT + static inline void ptep_set_wrprotect(struct mm_struct *mm, + unsigned long address, pte_t *ptep) +diff --git a/arch/riscv/kernel/efi.c b/arch/riscv/kernel/efi.c +index aa6209a74c83f..b64bf1624a052 100644 +--- a/arch/riscv/kernel/efi.c ++++ b/arch/riscv/kernel/efi.c +@@ -60,7 +60,7 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) + static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) + { + efi_memory_desc_t *md = data; +- pte_t pte = READ_ONCE(*ptep); ++ pte_t pte = ptep_get(ptep); + unsigned long val; + + if (md->attribute & EFI_MEMORY_RO) { +diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S +index 0097c145385f6..9691fa8f2faa7 100644 +--- a/arch/riscv/kernel/head.S ++++ b/arch/riscv/kernel/head.S +@@ -305,6 +305,9 @@ clear_bss_done: + #else + mv a0, a1 + #endif /* CONFIG_BUILTIN_DTB */ ++ /* Set trap vector to spin forever to help debug */ ++ la a3, .Lsecondary_park ++ csrw CSR_TVEC, a3 + call setup_vm + #ifdef CONFIG_MMU + la a0, early_pg_dir +diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c +index 2f08c14a933d0..fecbbcf40ac3f 100644 +--- a/arch/riscv/kernel/probes/kprobes.c ++++ b/arch/riscv/kernel/probes/kprobes.c +@@ -28,9 +28,8 @@ static void __kprobes arch_prepare_ss_slot(struct kprobe *p) + + p->ainsn.api.restore = (unsigned long)p->addr + offset; + +- patch_text(p->ainsn.api.insn, &p->opcode, 1); +- patch_text((void *)((unsigned long)(p->ainsn.api.insn) + offset), +- &insn, 1); ++ patch_text_nosync(p->ainsn.api.insn, &p->opcode, 1); ++ patch_text_nosync(p->ainsn.api.insn + offset, &insn, 1); + } + + static void __kprobes arch_prepare_simulate(struct kprobe *p) +diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c +index 068c745938710..a9e2fd7245e1e 100644 +--- a/arch/riscv/kvm/mmu.c ++++ b/arch/riscv/kvm/mmu.c +@@ -103,7 +103,7 @@ static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr, + *ptep_level = current_level; + ptep = (pte_t *)kvm->arch.pgd; + ptep = &ptep[gstage_pte_index(addr, current_level)]; +- while (ptep && pte_val(*ptep)) { ++ while (ptep && pte_val(ptep_get(ptep))) { + if (gstage_pte_leaf(ptep)) { + *ptep_level = current_level; + *ptepp = ptep; +@@ -113,7 +113,7 @@ static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr, + if (current_level) { + current_level--; + *ptep_level = current_level; +- ptep = (pte_t *)gstage_pte_page_vaddr(*ptep); ++ ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep)); + ptep = &ptep[gstage_pte_index(addr, current_level)]; + } else { + ptep = NULL; +@@ -149,25 +149,25 @@ static int gstage_set_pte(struct kvm *kvm, u32 level, + if (gstage_pte_leaf(ptep)) + return -EEXIST; + +- if (!pte_val(*ptep)) { ++ if (!pte_val(ptep_get(ptep))) { + if (!pcache) + return -ENOMEM; + next_ptep = kvm_mmu_memory_cache_alloc(pcache); + if (!next_ptep) + return -ENOMEM; +- *ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)), +- __pgprot(_PAGE_TABLE)); ++ set_pte(ptep, pfn_pte(PFN_DOWN(__pa(next_ptep)), ++ __pgprot(_PAGE_TABLE))); + } else { + if (gstage_pte_leaf(ptep)) + return -EEXIST; +- next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep); ++ next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep)); + } + + current_level--; + ptep = &next_ptep[gstage_pte_index(addr, current_level)]; + } + +- *ptep = *new_pte; ++ set_pte(ptep, *new_pte); + if (gstage_pte_leaf(ptep)) + gstage_remote_tlb_flush(kvm, current_level, addr); + +@@ -239,11 +239,11 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr, + + BUG_ON(addr & (page_size - 1)); + +- if (!pte_val(*ptep)) ++ if (!pte_val(ptep_get(ptep))) + return; + + if (ptep_level && !gstage_pte_leaf(ptep)) { +- next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep); ++ next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep)); + next_ptep_level = ptep_level - 1; + ret = gstage_level_to_page_size(next_ptep_level, + &next_page_size); +@@ -261,7 +261,7 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr, + if (op == GSTAGE_OP_CLEAR) + set_pte(ptep, __pte(0)); + else if (op == GSTAGE_OP_WP) +- set_pte(ptep, __pte(pte_val(*ptep) & ~_PAGE_WRITE)); ++ set_pte(ptep, __pte(pte_val(ptep_get(ptep)) & ~_PAGE_WRITE)); + gstage_remote_tlb_flush(kvm, ptep_level, addr); + } + } +@@ -603,7 +603,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) + &ptep, &ptep_level)) + return false; + +- return pte_young(*ptep); ++ return pte_young(ptep_get(ptep)); + } + + int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, +diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile +index 3a4dfc8babcf8..2c869f8026a88 100644 +--- a/arch/riscv/mm/Makefile ++++ b/arch/riscv/mm/Makefile +@@ -13,10 +13,9 @@ endif + KCOV_INSTRUMENT_init.o := n + + obj-y += init.o +-obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o ++obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o + obj-y += cacheflush.o + obj-y += context.o +-obj-y += pgtable.o + obj-y += pmem.o + + ifeq ($(CONFIG_MMU),y) +diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c +index 217fd4de61342..ba8eb3944687c 100644 +--- a/arch/riscv/mm/context.c ++++ b/arch/riscv/mm/context.c +@@ -323,6 +323,8 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, + if (unlikely(prev == next)) + return; + ++ membarrier_arch_switch_mm(prev, next, task); ++ + /* + * Mark the current MM context as inactive, and the next as + * active. This is at least used by the icache flushing +diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c +index 655b2b1bb529f..8960f4c844976 100644 +--- a/arch/riscv/mm/fault.c ++++ b/arch/riscv/mm/fault.c +@@ -137,24 +137,24 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a + pgd = (pgd_t *)pfn_to_virt(pfn) + index; + pgd_k = init_mm.pgd + index; + +- if (!pgd_present(*pgd_k)) { ++ if (!pgd_present(pgdp_get(pgd_k))) { + no_context(regs, addr); + return; + } +- set_pgd(pgd, *pgd_k); ++ set_pgd(pgd, pgdp_get(pgd_k)); + + p4d_k = p4d_offset(pgd_k, addr); +- if (!p4d_present(*p4d_k)) { ++ if (!p4d_present(p4dp_get(p4d_k))) { + no_context(regs, addr); + return; + } + + pud_k = pud_offset(p4d_k, addr); +- if (!pud_present(*pud_k)) { ++ if (!pud_present(pudp_get(pud_k))) { + no_context(regs, addr); + return; + } +- if (pud_leaf(*pud_k)) ++ if (pud_leaf(pudp_get(pud_k))) + goto flush_tlb; + + /* +@@ -162,11 +162,11 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a + * to copy individual PTEs + */ + pmd_k = pmd_offset(pud_k, addr); +- if (!pmd_present(*pmd_k)) { ++ if (!pmd_present(pmdp_get(pmd_k))) { + no_context(regs, addr); + return; + } +- if (pmd_leaf(*pmd_k)) ++ if (pmd_leaf(pmdp_get(pmd_k))) + goto flush_tlb; + + /* +@@ -176,7 +176,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a + * silently loop forever. + */ + pte_k = pte_offset_kernel(pmd_k, addr); +- if (!pte_present(*pte_k)) { ++ if (!pte_present(ptep_get(pte_k))) { + no_context(regs, addr); + return; + } +diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c +index fbe918801667d..5ef2a6891158a 100644 +--- a/arch/riscv/mm/hugetlbpage.c ++++ b/arch/riscv/mm/hugetlbpage.c +@@ -54,7 +54,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, + } + + if (sz == PMD_SIZE) { +- if (want_pmd_share(vma, addr) && pud_none(*pud)) ++ if (want_pmd_share(vma, addr) && pud_none(pudp_get(pud))) + pte = huge_pmd_share(mm, vma, addr, pud); + else + pte = (pte_t *)pmd_alloc(mm, pud, addr); +@@ -93,11 +93,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, + pmd_t *pmd; + + pgd = pgd_offset(mm, addr); +- if (!pgd_present(*pgd)) ++ if (!pgd_present(pgdp_get(pgd))) + return NULL; + + p4d = p4d_offset(pgd, addr); +- if (!p4d_present(*p4d)) ++ if (!p4d_present(p4dp_get(p4d))) + return NULL; + + pud = pud_offset(p4d, addr); +@@ -105,7 +105,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, + /* must be pud huge, non-present or none */ + return (pte_t *)pud; + +- if (!pud_present(*pud)) ++ if (!pud_present(pudp_get(pud))) + return NULL; + + pmd = pmd_offset(pud, addr); +@@ -113,7 +113,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, + /* must be pmd huge, non-present or none */ + return (pte_t *)pmd; + +- if (!pmd_present(*pmd)) ++ if (!pmd_present(pmdp_get(pmd))) + return NULL; + + for_each_napot_order(order) { +@@ -351,7 +351,7 @@ void huge_pte_clear(struct mm_struct *mm, + pte_t *ptep, + unsigned long sz) + { +- pte_t pte = READ_ONCE(*ptep); ++ pte_t pte = ptep_get(ptep); + int i, pte_num; + + if (!pte_napot(pte)) { +diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c +index abe7a7a7686c1..3245bb525212e 100644 +--- a/arch/riscv/mm/init.c ++++ b/arch/riscv/mm/init.c +@@ -235,7 +235,7 @@ static void __init setup_bootmem(void) + * The size of the linear page mapping may restrict the amount of + * usable RAM. + */ +- if (IS_ENABLED(CONFIG_64BIT)) { ++ if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU)) { + max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE; + memblock_cap_memory_range(phys_ram_base, + max_mapped_addr - phys_ram_base); +diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c +index 5e39dcf23fdbc..e962518530373 100644 +--- a/arch/riscv/mm/kasan_init.c ++++ b/arch/riscv/mm/kasan_init.c +@@ -31,7 +31,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned + phys_addr_t phys_addr; + pte_t *ptep, *p; + +- if (pmd_none(*pmd)) { ++ if (pmd_none(pmdp_get(pmd))) { + p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE); + set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE)); + } +@@ -39,7 +39,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned + ptep = pte_offset_kernel(pmd, vaddr); + + do { +- if (pte_none(*ptep)) { ++ if (pte_none(ptep_get(ptep))) { + phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); + set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL)); + memset(__va(phys_addr), KASAN_SHADOW_INIT, PAGE_SIZE); +@@ -53,7 +53,7 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned + pmd_t *pmdp, *p; + unsigned long next; + +- if (pud_none(*pud)) { ++ if (pud_none(pudp_get(pud))) { + p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE); + set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE)); + } +@@ -63,7 +63,8 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned + do { + next = pmd_addr_end(vaddr, end); + +- if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) { ++ if (pmd_none(pmdp_get(pmdp)) && IS_ALIGNED(vaddr, PMD_SIZE) && ++ (next - vaddr) >= PMD_SIZE) { + phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE); + if (phys_addr) { + set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL)); +@@ -83,7 +84,7 @@ static void __init kasan_populate_pud(p4d_t *p4d, + pud_t *pudp, *p; + unsigned long next; + +- if (p4d_none(*p4d)) { ++ if (p4d_none(p4dp_get(p4d))) { + p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE); + set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE)); + } +@@ -93,7 +94,8 @@ static void __init kasan_populate_pud(p4d_t *p4d, + do { + next = pud_addr_end(vaddr, end); + +- if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) { ++ if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) && ++ (next - vaddr) >= PUD_SIZE) { + phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE); + if (phys_addr) { + set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL)); +@@ -113,7 +115,7 @@ static void __init kasan_populate_p4d(pgd_t *pgd, + p4d_t *p4dp, *p; + unsigned long next; + +- if (pgd_none(*pgd)) { ++ if (pgd_none(pgdp_get(pgd))) { + p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE); + set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE)); + } +@@ -123,7 +125,8 @@ static void __init kasan_populate_p4d(pgd_t *pgd, + do { + next = p4d_addr_end(vaddr, end); + +- if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) { ++ if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) && ++ (next - vaddr) >= P4D_SIZE) { + phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE); + if (phys_addr) { + set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL)); +@@ -145,7 +148,7 @@ static void __init kasan_populate_pgd(pgd_t *pgdp, + do { + next = pgd_addr_end(vaddr, end); + +- if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) && ++ if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) && + (next - vaddr) >= PGDIR_SIZE) { + phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE); + if (phys_addr) { +@@ -168,7 +171,7 @@ static void __init kasan_early_clear_pud(p4d_t *p4dp, + if (!pgtable_l4_enabled) { + pudp = (pud_t *)p4dp; + } else { +- base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp))); ++ base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp)))); + pudp = base_pud + pud_index(vaddr); + } + +@@ -193,7 +196,7 @@ static void __init kasan_early_clear_p4d(pgd_t *pgdp, + if (!pgtable_l5_enabled) { + p4dp = (p4d_t *)pgdp; + } else { +- base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp))); ++ base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp)))); + p4dp = base_p4d + p4d_index(vaddr); + } + +@@ -239,14 +242,14 @@ static void __init kasan_early_populate_pud(p4d_t *p4dp, + if (!pgtable_l4_enabled) { + pudp = (pud_t *)p4dp; + } else { +- base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp))); ++ base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp)))); + pudp = base_pud + pud_index(vaddr); + } + + do { + next = pud_addr_end(vaddr, end); + +- if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && ++ if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) && + (next - vaddr) >= PUD_SIZE) { + phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd); + set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE)); +@@ -277,14 +280,14 @@ static void __init kasan_early_populate_p4d(pgd_t *pgdp, + if (!pgtable_l5_enabled) { + p4dp = (p4d_t *)pgdp; + } else { +- base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp))); ++ base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp)))); + p4dp = base_p4d + p4d_index(vaddr); + } + + do { + next = p4d_addr_end(vaddr, end); + +- if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && ++ if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) && + (next - vaddr) >= P4D_SIZE) { + phys_addr = __pa((uintptr_t)kasan_early_shadow_pud); + set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE)); +@@ -305,7 +308,7 @@ static void __init kasan_early_populate_pgd(pgd_t *pgdp, + do { + next = pgd_addr_end(vaddr, end); + +- if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) && ++ if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) && + (next - vaddr) >= PGDIR_SIZE) { + phys_addr = __pa((uintptr_t)kasan_early_shadow_p4d); + set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE)); +@@ -381,7 +384,7 @@ static void __init kasan_shallow_populate_pud(p4d_t *p4d, + do { + next = pud_addr_end(vaddr, end); + +- if (pud_none(*pud_k)) { ++ if (pud_none(pudp_get(pud_k))) { + p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE)); + continue; +@@ -401,7 +404,7 @@ static void __init kasan_shallow_populate_p4d(pgd_t *pgd, + do { + next = p4d_addr_end(vaddr, end); + +- if (p4d_none(*p4d_k)) { ++ if (p4d_none(p4dp_get(p4d_k))) { + p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE)); + continue; +@@ -420,7 +423,7 @@ static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long + do { + next = pgd_addr_end(vaddr, end); + +- if (pgd_none(*pgd_k)) { ++ if (pgd_none(pgdp_get(pgd_k))) { + p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE)); + continue; +@@ -451,7 +454,7 @@ static void __init create_tmp_mapping(void) + + /* Copy the last p4d since it is shared with the kernel mapping. */ + if (pgtable_l5_enabled) { +- ptr = (p4d_t *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END)); ++ ptr = (p4d_t *)pgd_page_vaddr(pgdp_get(pgd_offset_k(KASAN_SHADOW_END))); + memcpy(tmp_p4d, ptr, sizeof(p4d_t) * PTRS_PER_P4D); + set_pgd(&tmp_pg_dir[pgd_index(KASAN_SHADOW_END)], + pfn_pgd(PFN_DOWN(__pa(tmp_p4d)), PAGE_TABLE)); +@@ -462,7 +465,7 @@ static void __init create_tmp_mapping(void) + + /* Copy the last pud since it is shared with the kernel mapping. */ + if (pgtable_l4_enabled) { +- ptr = (pud_t *)p4d_page_vaddr(*(base_p4d + p4d_index(KASAN_SHADOW_END))); ++ ptr = (pud_t *)p4d_page_vaddr(p4dp_get(base_p4d + p4d_index(KASAN_SHADOW_END))); + memcpy(tmp_pud, ptr, sizeof(pud_t) * PTRS_PER_PUD); + set_p4d(&base_p4d[p4d_index(KASAN_SHADOW_END)], + pfn_p4d(PFN_DOWN(__pa(tmp_pud)), PAGE_TABLE)); +diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c +index f61b2f8291e35..271d01a5ba4da 100644 +--- a/arch/riscv/mm/pageattr.c ++++ b/arch/riscv/mm/pageattr.c +@@ -29,7 +29,7 @@ static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk) + static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr, + unsigned long next, struct mm_walk *walk) + { +- p4d_t val = READ_ONCE(*p4d); ++ p4d_t val = p4dp_get(p4d); + + if (p4d_leaf(val)) { + val = __p4d(set_pageattr_masks(p4d_val(val), walk)); +@@ -42,7 +42,7 @@ static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr, + static int pageattr_pud_entry(pud_t *pud, unsigned long addr, + unsigned long next, struct mm_walk *walk) + { +- pud_t val = READ_ONCE(*pud); ++ pud_t val = pudp_get(pud); + + if (pud_leaf(val)) { + val = __pud(set_pageattr_masks(pud_val(val), walk)); +@@ -55,7 +55,7 @@ static int pageattr_pud_entry(pud_t *pud, unsigned long addr, + static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr, + unsigned long next, struct mm_walk *walk) + { +- pmd_t val = READ_ONCE(*pmd); ++ pmd_t val = pmdp_get(pmd); + + if (pmd_leaf(val)) { + val = __pmd(set_pageattr_masks(pmd_val(val), walk)); +@@ -68,7 +68,7 @@ static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr, + static int pageattr_pte_entry(pte_t *pte, unsigned long addr, + unsigned long next, struct mm_walk *walk) + { +- pte_t val = READ_ONCE(*pte); ++ pte_t val = ptep_get(pte); + + val = __pte(set_pageattr_masks(pte_val(val), walk)); + set_pte(pte, val); +@@ -108,10 +108,10 @@ static int __split_linear_mapping_pmd(pud_t *pudp, + vaddr <= (vaddr & PMD_MASK) && end >= next) + continue; + +- if (pmd_leaf(*pmdp)) { ++ if (pmd_leaf(pmdp_get(pmdp))) { + struct page *pte_page; +- unsigned long pfn = _pmd_pfn(*pmdp); +- pgprot_t prot = __pgprot(pmd_val(*pmdp) & ~_PAGE_PFN_MASK); ++ unsigned long pfn = _pmd_pfn(pmdp_get(pmdp)); ++ pgprot_t prot = __pgprot(pmd_val(pmdp_get(pmdp)) & ~_PAGE_PFN_MASK); + pte_t *ptep_new; + int i; + +@@ -148,10 +148,10 @@ static int __split_linear_mapping_pud(p4d_t *p4dp, + vaddr <= (vaddr & PUD_MASK) && end >= next) + continue; + +- if (pud_leaf(*pudp)) { ++ if (pud_leaf(pudp_get(pudp))) { + struct page *pmd_page; +- unsigned long pfn = _pud_pfn(*pudp); +- pgprot_t prot = __pgprot(pud_val(*pudp) & ~_PAGE_PFN_MASK); ++ unsigned long pfn = _pud_pfn(pudp_get(pudp)); ++ pgprot_t prot = __pgprot(pud_val(pudp_get(pudp)) & ~_PAGE_PFN_MASK); + pmd_t *pmdp_new; + int i; + +@@ -197,10 +197,10 @@ static int __split_linear_mapping_p4d(pgd_t *pgdp, + vaddr <= (vaddr & P4D_MASK) && end >= next) + continue; + +- if (p4d_leaf(*p4dp)) { ++ if (p4d_leaf(p4dp_get(p4dp))) { + struct page *pud_page; +- unsigned long pfn = _p4d_pfn(*p4dp); +- pgprot_t prot = __pgprot(p4d_val(*p4dp) & ~_PAGE_PFN_MASK); ++ unsigned long pfn = _p4d_pfn(p4dp_get(p4dp)); ++ pgprot_t prot = __pgprot(p4d_val(p4dp_get(p4dp)) & ~_PAGE_PFN_MASK); + pud_t *pudp_new; + int i; + +@@ -427,29 +427,29 @@ bool kernel_page_present(struct page *page) + pte_t *pte; + + pgd = pgd_offset_k(addr); +- if (!pgd_present(*pgd)) ++ if (!pgd_present(pgdp_get(pgd))) + return false; +- if (pgd_leaf(*pgd)) ++ if (pgd_leaf(pgdp_get(pgd))) + return true; + + p4d = p4d_offset(pgd, addr); +- if (!p4d_present(*p4d)) ++ if (!p4d_present(p4dp_get(p4d))) + return false; +- if (p4d_leaf(*p4d)) ++ if (p4d_leaf(p4dp_get(p4d))) + return true; + + pud = pud_offset(p4d, addr); +- if (!pud_present(*pud)) ++ if (!pud_present(pudp_get(pud))) + return false; +- if (pud_leaf(*pud)) ++ if (pud_leaf(pudp_get(pud))) + return true; + + pmd = pmd_offset(pud, addr); +- if (!pmd_present(*pmd)) ++ if (!pmd_present(pmdp_get(pmd))) + return false; +- if (pmd_leaf(*pmd)) ++ if (pmd_leaf(pmdp_get(pmd))) + return true; + + pte = pte_offset_kernel(pmd, addr); +- return pte_present(*pte); ++ return pte_present(ptep_get(pte)); + } +diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c +index fef4e7328e490..ef887efcb6790 100644 +--- a/arch/riscv/mm/pgtable.c ++++ b/arch/riscv/mm/pgtable.c +@@ -5,6 +5,47 @@ + #include + #include + ++int ptep_set_access_flags(struct vm_area_struct *vma, ++ unsigned long address, pte_t *ptep, ++ pte_t entry, int dirty) ++{ ++ if (!pte_same(ptep_get(ptep), entry)) ++ __set_pte_at(ptep, entry); ++ /* ++ * update_mmu_cache will unconditionally execute, handling both ++ * the case that the PTE changed and the spurious fault case. ++ */ ++ return true; ++} ++ ++int ptep_test_and_clear_young(struct vm_area_struct *vma, ++ unsigned long address, ++ pte_t *ptep) ++{ ++ if (!pte_young(ptep_get(ptep))) ++ return 0; ++ return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep)); ++} ++EXPORT_SYMBOL_GPL(ptep_test_and_clear_young); ++ ++#ifdef CONFIG_64BIT ++pud_t *pud_offset(p4d_t *p4d, unsigned long address) ++{ ++ if (pgtable_l4_enabled) ++ return p4d_pgtable(p4dp_get(p4d)) + pud_index(address); ++ ++ return (pud_t *)p4d; ++} ++ ++p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) ++{ ++ if (pgtable_l5_enabled) ++ return pgd_pgtable(pgdp_get(pgd)) + p4d_index(address); ++ ++ return (p4d_t *)pgd; ++} ++#endif ++ + #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP + int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) + { +@@ -25,7 +66,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot) + + int pud_clear_huge(pud_t *pud) + { +- if (!pud_leaf(READ_ONCE(*pud))) ++ if (!pud_leaf(pudp_get(pud))) + return 0; + pud_clear(pud); + return 1; +@@ -33,7 +74,7 @@ int pud_clear_huge(pud_t *pud) + + int pud_free_pmd_page(pud_t *pud, unsigned long addr) + { +- pmd_t *pmd = pud_pgtable(*pud); ++ pmd_t *pmd = pud_pgtable(pudp_get(pud)); + int i; + + pud_clear(pud); +@@ -63,7 +104,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot) + + int pmd_clear_huge(pmd_t *pmd) + { +- if (!pmd_leaf(READ_ONCE(*pmd))) ++ if (!pmd_leaf(pmdp_get(pmd))) + return 0; + pmd_clear(pmd); + return 1; +@@ -71,7 +112,7 @@ int pmd_clear_huge(pmd_t *pmd) + + int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) + { +- pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd); ++ pte_t *pte = (pte_t *)pmd_page_vaddr(pmdp_get(pmd)); + + pmd_clear(pmd); + +@@ -88,7 +129,7 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, + pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); +- VM_BUG_ON(pmd_trans_huge(*pmdp)); ++ VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp))); + /* + * When leaf PTE entries (regular pages) are collapsed into a leaf + * PMD entry (huge page), a valid non-leaf PTE is converted into a +diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S +index 2ae201ebf90b9..de5f9f623f5b2 100644 +--- a/arch/s390/kernel/vmlinux.lds.S ++++ b/arch/s390/kernel/vmlinux.lds.S +@@ -71,6 +71,15 @@ SECTIONS + . = ALIGN(PAGE_SIZE); + __end_ro_after_init = .; + ++ .data.rel.ro : { ++ *(.data.rel.ro .data.rel.ro.*) ++ } ++ .got : { ++ __got_start = .; ++ *(.got) ++ __got_end = .; ++ } ++ + RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE) + BOOT_DATA_PRESERVED + +diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c +index 375200e9aba9a..2ba4e0d4e26b0 100644 +--- a/arch/um/drivers/line.c ++++ b/arch/um/drivers/line.c +@@ -383,6 +383,7 @@ int setup_one_line(struct line *lines, int n, char *init, + parse_chan_pair(NULL, line, n, opts, error_out); + err = 0; + } ++ *error_out = "configured as 'none'"; + } else { + char *new = kstrdup(init, GFP_KERNEL); + if (!new) { +@@ -406,6 +407,7 @@ int setup_one_line(struct line *lines, int n, char *init, + } + } + if (err) { ++ *error_out = "failed to parse channel pair"; + line->init_str = NULL; + line->valid = 0; + kfree(new); +diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c +index f3c75809fed26..006041fbb65f8 100644 +--- a/arch/x86/coco/tdx/tdx.c ++++ b/arch/x86/coco/tdx/tdx.c +@@ -362,7 +362,6 @@ static bool mmio_read(int size, unsigned long addr, unsigned long *val) + .r12 = size, + .r13 = EPT_READ, + .r14 = addr, +- .r15 = *val, + }; + + if (__tdx_hypercall_ret(&args)) +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c +index bc4fcf0d94056..688550e336ce1 100644 +--- a/arch/x86/events/intel/core.c ++++ b/arch/x86/events/intel/core.c +@@ -4465,6 +4465,25 @@ static u8 adl_get_hybrid_cpu_type(void) + return hybrid_big; + } + ++static inline bool erratum_hsw11(struct perf_event *event) ++{ ++ return (event->hw.config & INTEL_ARCH_EVENT_MASK) == ++ X86_CONFIG(.event=0xc0, .umask=0x01); ++} ++ ++/* ++ * The HSW11 requires a period larger than 100 which is the same as the BDM11. ++ * A minimum period of 128 is enforced as well for the INST_RETIRED.ALL. ++ * ++ * The message 'interrupt took too long' can be observed on any counter which ++ * was armed with a period < 32 and two events expired in the same NMI. ++ * A minimum period of 32 is enforced for the rest of the events. ++ */ ++static void hsw_limit_period(struct perf_event *event, s64 *left) ++{ ++ *left = max(*left, erratum_hsw11(event) ? 128 : 32); ++} ++ + /* + * Broadwell: + * +@@ -4482,8 +4501,7 @@ static u8 adl_get_hybrid_cpu_type(void) + */ + static void bdw_limit_period(struct perf_event *event, s64 *left) + { +- if ((event->hw.config & INTEL_ARCH_EVENT_MASK) == +- X86_CONFIG(.event=0xc0, .umask=0x01)) { ++ if (erratum_hsw11(event)) { + if (*left < 128) + *left = 128; + *left &= ~0x3fULL; +@@ -6392,6 +6410,7 @@ __init int intel_pmu_init(void) + + x86_pmu.hw_config = hsw_hw_config; + x86_pmu.get_event_constraints = hsw_get_event_constraints; ++ x86_pmu.limit_period = hsw_limit_period; + x86_pmu.lbr_double_abort = true; + extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? + hsw_format_attr : nhm_format_attr; +diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h +index eb810074f1e74..fd5fb43d920b4 100644 +--- a/arch/x86/include/asm/fpu/types.h ++++ b/arch/x86/include/asm/fpu/types.h +@@ -589,6 +589,13 @@ struct fpu_state_config { + * even without XSAVE support, i.e. legacy features FP + SSE + */ + u64 legacy_features; ++ /* ++ * @independent_features: ++ * ++ * Features that are supported by XSAVES, but not managed as part of ++ * the FPU core, such as LBR ++ */ ++ u64 independent_features; + }; + + /* FPU state configuration information */ +diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h +index cc6b8e087192e..9dab85aba7afd 100644 +--- a/arch/x86/include/asm/page_64.h ++++ b/arch/x86/include/asm/page_64.h +@@ -17,6 +17,7 @@ extern unsigned long phys_base; + extern unsigned long page_offset_base; + extern unsigned long vmalloc_base; + extern unsigned long vmemmap_base; ++extern unsigned long physmem_end; + + static __always_inline unsigned long __phys_addr_nodebug(unsigned long x) + { +diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h +index 38b54b992f32e..35c416f061552 100644 +--- a/arch/x86/include/asm/pgtable_64_types.h ++++ b/arch/x86/include/asm/pgtable_64_types.h +@@ -140,6 +140,10 @@ extern unsigned int ptrs_per_p4d; + # define VMEMMAP_START __VMEMMAP_BASE_L4 + #endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */ + ++#ifdef CONFIG_RANDOMIZE_MEMORY ++# define PHYSMEM_END physmem_end ++#endif ++ + /* + * End of the region for which vmalloc page tables are pre-allocated. + * For non-KMSAN builds, this is the same as VMALLOC_END. +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index 06d5a7eeee81a..f2e605ee75d96 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -1812,12 +1812,9 @@ static __init void apic_set_fixmap(bool read_apic); + + static __init void x2apic_disable(void) + { +- u32 x2apic_id, state = x2apic_state; ++ u32 x2apic_id; + +- x2apic_mode = 0; +- x2apic_state = X2APIC_DISABLED; +- +- if (state != X2APIC_ON) ++ if (x2apic_state < X2APIC_ON) + return; + + x2apic_id = read_apic_id(); +@@ -1830,6 +1827,10 @@ static __init void x2apic_disable(void) + } + + __x2apic_disable(); ++ ++ x2apic_mode = 0; ++ x2apic_state = X2APIC_DISABLED; ++ + /* + * Don't reread the APIC ID as it was already done from + * check_x2apic() and the APIC driver still is a x2APIC variant, +diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c +index 571a43b3105df..255ff8f6c5270 100644 +--- a/arch/x86/kernel/fpu/xstate.c ++++ b/arch/x86/kernel/fpu/xstate.c +@@ -788,6 +788,9 @@ void __init fpu__init_system_xstate(unsigned int legacy_size) + goto out_disable; + } + ++ fpu_kernel_cfg.independent_features = fpu_kernel_cfg.max_features & ++ XFEATURE_MASK_INDEPENDENT; ++ + /* + * Clear XSAVE features that are disabled in the normal CPUID. + */ +diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h +index 19ca623ffa2ac..544224611e23c 100644 +--- a/arch/x86/kernel/fpu/xstate.h ++++ b/arch/x86/kernel/fpu/xstate.h +@@ -64,9 +64,9 @@ static inline u64 xfeatures_mask_supervisor(void) + static inline u64 xfeatures_mask_independent(void) + { + if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR)) +- return XFEATURE_MASK_INDEPENDENT & ~XFEATURE_MASK_LBR; ++ return fpu_kernel_cfg.independent_features & ~XFEATURE_MASK_LBR; + +- return XFEATURE_MASK_INDEPENDENT; ++ return fpu_kernel_cfg.independent_features; + } + + /* XSAVE/XRSTOR wrapper functions */ +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c +index e3c2acc1adc73..413f1f2aadd1a 100644 +--- a/arch/x86/kvm/svm/svm.c ++++ b/arch/x86/kvm/svm/svm.c +@@ -2869,6 +2869,12 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + case MSR_CSTAR: + msr_info->data = svm->vmcb01.ptr->save.cstar; + break; ++ case MSR_GS_BASE: ++ msr_info->data = svm->vmcb01.ptr->save.gs.base; ++ break; ++ case MSR_FS_BASE: ++ msr_info->data = svm->vmcb01.ptr->save.fs.base; ++ break; + case MSR_KERNEL_GS_BASE: + msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base; + break; +@@ -3090,6 +3096,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) + case MSR_CSTAR: + svm->vmcb01.ptr->save.cstar = data; + break; ++ case MSR_GS_BASE: ++ svm->vmcb01.ptr->save.gs.base = data; ++ break; ++ case MSR_FS_BASE: ++ svm->vmcb01.ptr->save.fs.base = data; ++ break; + case MSR_KERNEL_GS_BASE: + svm->vmcb01.ptr->save.kernel_gs_base = data; + break; +@@ -5166,6 +5178,9 @@ static __init void svm_set_cpu_caps(void) + + /* CPUID 0x8000001F (SME/SEV features) */ + sev_set_cpu_caps(); ++ ++ /* Don't advertise Bus Lock Detect to guest if SVM support is absent */ ++ kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT); + } + + static __init int svm_hardware_setup(void) +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index c7e7ab1593d5b..50cc822e12900 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -5829,7 +5829,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, + if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) + break; + ++ kvm_vcpu_srcu_read_lock(vcpu); + r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); ++ kvm_vcpu_srcu_read_unlock(vcpu); + break; + } + case KVM_GET_DEBUGREGS: { +diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c +index e0411a3774d49..5eecb45d05d5d 100644 +--- a/arch/x86/lib/iomem.c ++++ b/arch/x86/lib/iomem.c +@@ -25,6 +25,9 @@ static __always_inline void rep_movs(void *to, const void *from, size_t n) + + static void string_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n) + { ++ const void *orig_to = to; ++ const size_t orig_n = n; ++ + if (unlikely(!n)) + return; + +@@ -39,7 +42,7 @@ static void string_memcpy_fromio(void *to, const volatile void __iomem *from, si + } + rep_movs(to, (const void *)from, n); + /* KMSAN must treat values read from devices as initialized. */ +- kmsan_unpoison_memory(to, n); ++ kmsan_unpoison_memory(orig_to, orig_n); + } + + static void string_memcpy_toio(volatile void __iomem *to, const void *from, size_t n) +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c +index 19d209b412d7a..aa69353da49f2 100644 +--- a/arch/x86/mm/init_64.c ++++ b/arch/x86/mm/init_64.c +@@ -950,8 +950,12 @@ static void update_end_of_memory_vars(u64 start, u64 size) + int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, + struct mhp_params *params) + { ++ unsigned long end = ((start_pfn + nr_pages) << PAGE_SHIFT) - 1; + int ret; + ++ if (WARN_ON_ONCE(end > PHYSMEM_END)) ++ return -ERANGE; ++ + ret = __add_pages(nid, start_pfn, nr_pages, params); + WARN_ON_ONCE(ret); + +diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c +index 37db264866b64..230f1dee4f095 100644 +--- a/arch/x86/mm/kaslr.c ++++ b/arch/x86/mm/kaslr.c +@@ -47,13 +47,24 @@ static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE; + */ + static __initdata struct kaslr_memory_region { + unsigned long *base; ++ unsigned long *end; + unsigned long size_tb; + } kaslr_regions[] = { +- { &page_offset_base, 0 }, +- { &vmalloc_base, 0 }, +- { &vmemmap_base, 0 }, ++ { ++ .base = &page_offset_base, ++ .end = &physmem_end, ++ }, ++ { ++ .base = &vmalloc_base, ++ }, ++ { ++ .base = &vmemmap_base, ++ }, + }; + ++/* The end of the possible address space for physical memory */ ++unsigned long physmem_end __ro_after_init; ++ + /* Get size in bytes used by the memory region */ + static inline unsigned long get_padding(struct kaslr_memory_region *region) + { +@@ -82,6 +93,8 @@ void __init kernel_randomize_memory(void) + BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE); + BUILD_BUG_ON(vaddr_end > __START_KERNEL_map); + ++ /* Preset the end of the possible address space for physical memory */ ++ physmem_end = ((1ULL << MAX_PHYSMEM_BITS) - 1); + if (!kaslr_memory_enabled()) + return; + +@@ -128,11 +141,18 @@ void __init kernel_randomize_memory(void) + vaddr += entropy; + *kaslr_regions[i].base = vaddr; + ++ /* Calculate the end of the region */ ++ vaddr += get_padding(&kaslr_regions[i]); + /* +- * Jump the region and add a minimum padding based on +- * randomization alignment. ++ * KASLR trims the maximum possible size of the ++ * direct-map. Update the physmem_end boundary. ++ * No rounding required as the region starts ++ * PUD aligned and size is in units of TB. + */ +- vaddr += get_padding(&kaslr_regions[i]); ++ if (kaslr_regions[i].end) ++ *kaslr_regions[i].end = __pa_nodebug(vaddr - 1); ++ ++ /* Add a minimum padding based on randomization alignment. */ + vaddr = round_up(vaddr + 1, PUD_SIZE); + remain_entropy -= entropy; + } +diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c +index 41d8c8f475a7c..83a6bdf0b498e 100644 +--- a/arch/x86/mm/pti.c ++++ b/arch/x86/mm/pti.c +@@ -241,7 +241,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) + * + * Returns a pointer to a PTE on success, or NULL on failure. + */ +-static pte_t *pti_user_pagetable_walk_pte(unsigned long address) ++static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text) + { + gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + pmd_t *pmd; +@@ -251,10 +251,15 @@ static pte_t *pti_user_pagetable_walk_pte(unsigned long address) + if (!pmd) + return NULL; + +- /* We can't do anything sensible if we hit a large mapping. */ ++ /* Large PMD mapping found */ + if (pmd_large(*pmd)) { +- WARN_ON(1); +- return NULL; ++ /* Clear the PMD if we hit a large mapping from the first round */ ++ if (late_text) { ++ set_pmd(pmd, __pmd(0)); ++ } else { ++ WARN_ON_ONCE(1); ++ return NULL; ++ } + } + + if (pmd_none(*pmd)) { +@@ -283,7 +288,7 @@ static void __init pti_setup_vsyscall(void) + if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte)) + return; + +- target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR); ++ target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false); + if (WARN_ON(!target_pte)) + return; + +@@ -301,7 +306,7 @@ enum pti_clone_level { + + static void + pti_clone_pgtable(unsigned long start, unsigned long end, +- enum pti_clone_level level) ++ enum pti_clone_level level, bool late_text) + { + unsigned long addr; + +@@ -390,7 +395,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end, + return; + + /* Allocate PTE in the user page-table */ +- target_pte = pti_user_pagetable_walk_pte(addr); ++ target_pte = pti_user_pagetable_walk_pte(addr, late_text); + if (WARN_ON(!target_pte)) + return; + +@@ -452,7 +457,7 @@ static void __init pti_clone_user_shared(void) + phys_addr_t pa = per_cpu_ptr_to_phys((void *)va); + pte_t *target_pte; + +- target_pte = pti_user_pagetable_walk_pte(va); ++ target_pte = pti_user_pagetable_walk_pte(va, false); + if (WARN_ON(!target_pte)) + return; + +@@ -475,7 +480,7 @@ static void __init pti_clone_user_shared(void) + start = CPU_ENTRY_AREA_BASE; + end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES); + +- pti_clone_pgtable(start, end, PTI_CLONE_PMD); ++ pti_clone_pgtable(start, end, PTI_CLONE_PMD, false); + } + #endif /* CONFIG_X86_64 */ + +@@ -492,11 +497,11 @@ static void __init pti_setup_espfix64(void) + /* + * Clone the populated PMDs of the entry text and force it RO. + */ +-static void pti_clone_entry_text(void) ++static void pti_clone_entry_text(bool late) + { + pti_clone_pgtable((unsigned long) __entry_text_start, + (unsigned long) __entry_text_end, +- PTI_LEVEL_KERNEL_IMAGE); ++ PTI_LEVEL_KERNEL_IMAGE, late); + } + + /* +@@ -571,7 +576,7 @@ static void pti_clone_kernel_text(void) + * pti_set_kernel_image_nonglobal() did to clear the + * global bit. + */ +- pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE); ++ pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false); + + /* + * pti_clone_pgtable() will set the global bit in any PMDs +@@ -638,8 +643,15 @@ void __init pti_init(void) + + /* Undo all global bits from the init pagetables in head_64.S: */ + pti_set_kernel_image_nonglobal(); ++ + /* Replace some of the global bits just for shared entry text: */ +- pti_clone_entry_text(); ++ /* ++ * This is very early in boot. Device and Late initcalls can do ++ * modprobe before free_initmem() and mark_readonly(). This ++ * pti_clone_entry_text() allows those user-mode-helpers to function, ++ * but notably the text is still RW. ++ */ ++ pti_clone_entry_text(false); + pti_setup_espfix64(); + pti_setup_vsyscall(); + } +@@ -656,10 +668,11 @@ void pti_finalize(void) + if (!boot_cpu_has(X86_FEATURE_PTI)) + return; + /* +- * We need to clone everything (again) that maps parts of the +- * kernel image. ++ * This is after free_initmem() (all initcalls are done) and we've done ++ * mark_readonly(). Text is now NX which might've split some PMDs ++ * relative to the early clone. + */ +- pti_clone_entry_text(); ++ pti_clone_entry_text(true); + pti_clone_kernel_text(); + + debug_checkwx_user(); +diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2_security.c b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c +index 908710524dc9e..493e556cd31b7 100644 +--- a/drivers/accel/habanalabs/gaudi2/gaudi2_security.c ++++ b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c +@@ -479,6 +479,7 @@ static const u32 gaudi2_pb_dcr0_edma0_unsecured_regs[] = { + mmDCORE0_EDMA0_CORE_CTX_TE_NUMROWS, + mmDCORE0_EDMA0_CORE_CTX_IDX, + mmDCORE0_EDMA0_CORE_CTX_IDX_INC, ++ mmDCORE0_EDMA0_CORE_WR_COMP_MAX_OUTSTAND, + mmDCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG, + mmDCORE0_EDMA0_QM_CQ_CFG0_0, + mmDCORE0_EDMA0_QM_CQ_CFG0_1, +diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c +index 0f5218e361df5..7053f1b9fc1dd 100644 +--- a/drivers/acpi/acpi_processor.c ++++ b/drivers/acpi/acpi_processor.c +@@ -415,7 +415,7 @@ static int acpi_processor_add(struct acpi_device *device, + + result = acpi_processor_get_info(device); + if (result) /* Processor is not physically present or unavailable */ +- return 0; ++ goto err_clear_driver_data; + + BUG_ON(pr->id >= nr_cpu_ids); + +@@ -430,7 +430,7 @@ static int acpi_processor_add(struct acpi_device *device, + "BIOS reported wrong ACPI id %d for the processor\n", + pr->id); + /* Give up, but do not abort the namespace scan. */ +- goto err; ++ goto err_clear_driver_data; + } + /* + * processor_device_array is not cleared on errors to allow buggy BIOS +@@ -442,12 +442,12 @@ static int acpi_processor_add(struct acpi_device *device, + dev = get_cpu_device(pr->id); + if (!dev) { + result = -ENODEV; +- goto err; ++ goto err_clear_per_cpu; + } + + result = acpi_bind_one(dev, device); + if (result) +- goto err; ++ goto err_clear_per_cpu; + + pr->dev = dev; + +@@ -458,10 +458,11 @@ static int acpi_processor_add(struct acpi_device *device, + dev_err(dev, "Processor driver could not be attached\n"); + acpi_unbind_one(dev); + +- err: +- free_cpumask_var(pr->throttling.shared_cpu_map); +- device->driver_data = NULL; ++ err_clear_per_cpu: + per_cpu(processors, pr->id) = NULL; ++ err_clear_driver_data: ++ device->driver_data = NULL; ++ free_cpumask_var(pr->throttling.shared_cpu_map); + err_free_pr: + kfree(pr); + return result; +diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c +index d3b9da75a8155..d6934ba7a3154 100644 +--- a/drivers/acpi/cppc_acpi.c ++++ b/drivers/acpi/cppc_acpi.c +@@ -1196,6 +1196,19 @@ int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf) + return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf); + } + ++/** ++ * cppc_get_highest_perf - Get the highest performance register value. ++ * @cpunum: CPU from which to get highest performance. ++ * @highest_perf: Return address. ++ * ++ * Return: 0 for success, -EIO otherwise. ++ */ ++int cppc_get_highest_perf(int cpunum, u64 *highest_perf) ++{ ++ return cppc_get_perf(cpunum, HIGHEST_PERF, highest_perf); ++} ++EXPORT_SYMBOL_GPL(cppc_get_highest_perf); ++ + /** + * cppc_get_epp_perf - Get the epp register value. + * @cpunum: CPU from which to get epp preference value. +diff --git a/drivers/android/binder.c b/drivers/android/binder.c +index ccaceedbc2c01..94f10c6eb336a 100644 +--- a/drivers/android/binder.c ++++ b/drivers/android/binder.c +@@ -3342,6 +3342,7 @@ static void binder_transaction(struct binder_proc *proc, + */ + copy_size = object_offset - user_offset; + if (copy_size && (user_offset > object_offset || ++ object_offset > tr->data_size || + binder_alloc_copy_user_to_buffer( + &target_proc->alloc, + t->buffer, user_offset, +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c +index 373d23af1d9ac..4ed90d46a017a 100644 +--- a/drivers/ata/libata-core.c ++++ b/drivers/ata/libata-core.c +@@ -5593,8 +5593,10 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports) + } + + dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL); +- if (!dr) ++ if (!dr) { ++ kfree(host); + goto err_out; ++ } + + devres_add(dev, dr); + dev_set_drvdata(dev, host); +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index 277bf0e8ed091..c91f8746289f4 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -242,10 +242,17 @@ void ata_scsi_set_sense_information(struct ata_device *dev, + */ + static void ata_scsi_set_passthru_sense_fields(struct ata_queued_cmd *qc) + { ++ struct ata_device *dev = qc->dev; + struct scsi_cmnd *cmd = qc->scsicmd; + struct ata_taskfile *tf = &qc->result_tf; + unsigned char *sb = cmd->sense_buffer; + ++ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) { ++ ata_dev_dbg(dev, ++ "missing result TF: can't set ATA PT sense fields\n"); ++ return; ++ } ++ + if ((sb[0] & 0x7f) >= 0x72) { + unsigned char *desc; + u8 len; +@@ -924,12 +931,16 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, + */ + static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) + { ++ struct ata_device *dev = qc->dev; + struct scsi_cmnd *cmd = qc->scsicmd; + struct ata_taskfile *tf = &qc->result_tf; +- unsigned char *sb = cmd->sense_buffer; + u8 sense_key, asc, ascq; + +- memset(sb, 0, SCSI_SENSE_BUFFERSIZE); ++ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) { ++ ata_dev_dbg(dev, ++ "missing result TF: can't generate ATA PT sense data\n"); ++ return; ++ } + + /* + * Use ata_to_sense_error() to map status register bits +@@ -976,14 +987,19 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc) + u64 block; + u8 sense_key, asc, ascq; + +- memset(sb, 0, SCSI_SENSE_BUFFERSIZE); +- + if (ata_dev_disabled(dev)) { + /* Device disabled after error recovery */ + /* LOGICAL UNIT NOT READY, HARD RESET REQUIRED */ + ata_scsi_set_sense(dev, cmd, NOT_READY, 0x04, 0x21); + return; + } ++ ++ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) { ++ ata_dev_dbg(dev, ++ "missing result TF: can't generate sense data\n"); ++ return; ++ } ++ + /* Use ata_to_sense_error() to map status register bits + * onto sense key, asc & ascq. + */ +diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c +index 17f6ccee53c7c..ffbb2e8591cef 100644 +--- a/drivers/ata/pata_macio.c ++++ b/drivers/ata/pata_macio.c +@@ -541,7 +541,8 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc) + + while (sg_len) { + /* table overflow should never happen */ +- BUG_ON (pi++ >= MAX_DCMDS); ++ if (WARN_ON_ONCE(pi >= MAX_DCMDS)) ++ return AC_ERR_SYSTEM; + + len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG; + table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE); +@@ -553,11 +554,13 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc) + addr += len; + sg_len -= len; + ++table; ++ ++pi; + } + } + + /* Should never happen according to Tejun */ +- BUG_ON(!pi); ++ if (WARN_ON_ONCE(!pi)) ++ return AC_ERR_SYSTEM; + + /* Convert the last command to an input/output */ + table--; +diff --git a/drivers/base/devres.c b/drivers/base/devres.c +index 8d709dbd4e0c1..e9b0d94aeabd9 100644 +--- a/drivers/base/devres.c ++++ b/drivers/base/devres.c +@@ -567,6 +567,7 @@ void * devres_open_group(struct device *dev, void *id, gfp_t gfp) + grp->id = grp; + if (id) + grp->id = id; ++ grp->color = 0; + + spin_lock_irqsave(&dev->devres_lock, flags); + add_dr(dev, &grp->node[0]); +diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c +index 55999a50ccc0b..0b6c2277128b4 100644 +--- a/drivers/base/regmap/regcache-maple.c ++++ b/drivers/base/regmap/regcache-maple.c +@@ -110,7 +110,8 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min, + struct maple_tree *mt = map->cache; + MA_STATE(mas, mt, min, max); + unsigned long *entry, *lower, *upper; +- unsigned long lower_index, lower_last; ++ /* initialized to work around false-positive -Wuninitialized warning */ ++ unsigned long lower_index = 0, lower_last = 0; + unsigned long upper_index, upper_last; + int ret = 0; + +diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c +index f4e0573c47114..bf7f68e90953b 100644 +--- a/drivers/block/ublk_drv.c ++++ b/drivers/block/ublk_drv.c +@@ -2603,6 +2603,8 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub, + mutex_lock(&ub->mutex); + if (!ublk_can_use_recovery(ub)) + goto out_unlock; ++ if (!ub->nr_queues_ready) ++ goto out_unlock; + /* + * START_RECOVERY is only allowd after: + * +diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c +index 814dd966b1a45..5ee9a8b8dcfdb 100644 +--- a/drivers/bluetooth/btnxpuart.c ++++ b/drivers/bluetooth/btnxpuart.c +@@ -1326,8 +1326,10 @@ static int btnxpuart_close(struct hci_dev *hdev) + + serdev_device_close(nxpdev->serdev); + skb_queue_purge(&nxpdev->txq); +- kfree_skb(nxpdev->rx_skb); +- nxpdev->rx_skb = NULL; ++ if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) { ++ kfree_skb(nxpdev->rx_skb); ++ nxpdev->rx_skb = NULL; ++ } + clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state); + return 0; + } +@@ -1342,8 +1344,10 @@ static int btnxpuart_flush(struct hci_dev *hdev) + + cancel_work_sync(&nxpdev->tx_work); + +- kfree_skb(nxpdev->rx_skb); +- nxpdev->rx_skb = NULL; ++ if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) { ++ kfree_skb(nxpdev->rx_skb); ++ nxpdev->rx_skb = NULL; ++ } + + return 0; + } +diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c +index 9082456d80fbf..7a552387129ef 100644 +--- a/drivers/bluetooth/hci_qca.c ++++ b/drivers/bluetooth/hci_qca.c +@@ -1090,6 +1090,7 @@ static void qca_controller_memdump(struct work_struct *work) + qca->memdump_state = QCA_MEMDUMP_COLLECTED; + cancel_delayed_work(&qca->ctrl_memdump_timeout); + clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); ++ clear_bit(QCA_IBS_DISABLED, &qca->flags); + mutex_unlock(&qca->hci_memdump_lock); + return; + } +diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c +index 85aa089650eaa..1701cce74df79 100644 +--- a/drivers/clk/qcom/clk-alpha-pll.c ++++ b/drivers/clk/qcom/clk-alpha-pll.c +@@ -40,7 +40,7 @@ + + #define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL]) + # define PLL_POST_DIV_SHIFT 8 +-# define PLL_POST_DIV_MASK(p) GENMASK((p)->width, 0) ++# define PLL_POST_DIV_MASK(p) GENMASK((p)->width - 1, 0) + # define PLL_ALPHA_EN BIT(24) + # define PLL_ALPHA_MODE BIT(25) + # define PLL_VCO_SHIFT 20 +@@ -1478,8 +1478,8 @@ clk_trion_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate, + } + + return regmap_update_bits(regmap, PLL_USER_CTL(pll), +- PLL_POST_DIV_MASK(pll) << PLL_POST_DIV_SHIFT, +- val << PLL_POST_DIV_SHIFT); ++ PLL_POST_DIV_MASK(pll) << pll->post_div_shift, ++ val << pll->post_div_shift); + } + + const struct clk_ops clk_alpha_pll_postdiv_trion_ops = { +diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h +index e6d84c8c7989c..84c497f361bc6 100644 +--- a/drivers/clk/qcom/clk-rcg.h ++++ b/drivers/clk/qcom/clk-rcg.h +@@ -176,6 +176,7 @@ extern const struct clk_ops clk_byte2_ops; + extern const struct clk_ops clk_pixel_ops; + extern const struct clk_ops clk_gfx3d_ops; + extern const struct clk_ops clk_rcg2_shared_ops; ++extern const struct clk_ops clk_rcg2_shared_no_init_park_ops; + extern const struct clk_ops clk_dp_ops; + + struct clk_rcg_dfs_data { +diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c +index b9f2a29be927c..461f54fe5e4f1 100644 +--- a/drivers/clk/qcom/clk-rcg2.c ++++ b/drivers/clk/qcom/clk-rcg2.c +@@ -1182,6 +1182,36 @@ const struct clk_ops clk_rcg2_shared_ops = { + }; + EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops); + ++static int clk_rcg2_shared_no_init_park(struct clk_hw *hw) ++{ ++ struct clk_rcg2 *rcg = to_clk_rcg2(hw); ++ ++ /* ++ * Read the config register so that the parent is properly mapped at ++ * registration time. ++ */ ++ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg); ++ ++ return 0; ++} ++ ++/* ++ * Like clk_rcg2_shared_ops but skip the init so that the clk frequency is left ++ * unchanged at registration time. ++ */ ++const struct clk_ops clk_rcg2_shared_no_init_park_ops = { ++ .init = clk_rcg2_shared_no_init_park, ++ .enable = clk_rcg2_shared_enable, ++ .disable = clk_rcg2_shared_disable, ++ .get_parent = clk_rcg2_shared_get_parent, ++ .set_parent = clk_rcg2_shared_set_parent, ++ .recalc_rate = clk_rcg2_shared_recalc_rate, ++ .determine_rate = clk_rcg2_determine_rate, ++ .set_rate = clk_rcg2_shared_set_rate, ++ .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent, ++}; ++EXPORT_SYMBOL_GPL(clk_rcg2_shared_no_init_park_ops); ++ + /* Common APIs to be used for DFS based RCGR */ + static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l, + struct freq_tbl *f) +diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c +index f8b9a1e93bef2..cdbbf2cc9c5d1 100644 +--- a/drivers/clk/qcom/gcc-ipq9574.c ++++ b/drivers/clk/qcom/gcc-ipq9574.c +@@ -65,7 +65,7 @@ static const struct clk_parent_data gcc_sleep_clk_data[] = { + + static struct clk_alpha_pll gpll0_main = { + .offset = 0x20000, +- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], ++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO], + .clkr = { + .enable_reg = 0x0b000, + .enable_mask = BIT(0), +@@ -93,7 +93,7 @@ static struct clk_fixed_factor gpll0_out_main_div2 = { + + static struct clk_alpha_pll_postdiv gpll0 = { + .offset = 0x20000, +- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], ++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO], + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gpll0", +@@ -107,7 +107,7 @@ static struct clk_alpha_pll_postdiv gpll0 = { + + static struct clk_alpha_pll gpll4_main = { + .offset = 0x22000, +- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], ++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO], + .clkr = { + .enable_reg = 0x0b000, + .enable_mask = BIT(2), +@@ -122,7 +122,7 @@ static struct clk_alpha_pll gpll4_main = { + + static struct clk_alpha_pll_postdiv gpll4 = { + .offset = 0x22000, +- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], ++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO], + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gpll4", +@@ -136,7 +136,7 @@ static struct clk_alpha_pll_postdiv gpll4 = { + + static struct clk_alpha_pll gpll2_main = { + .offset = 0x21000, +- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], ++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO], + .clkr = { + .enable_reg = 0x0b000, + .enable_mask = BIT(1), +@@ -151,7 +151,7 @@ static struct clk_alpha_pll gpll2_main = { + + static struct clk_alpha_pll_postdiv gpll2 = { + .offset = 0x21000, +- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], ++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO], + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gpll2", +diff --git a/drivers/clk/qcom/gcc-sm8550.c b/drivers/clk/qcom/gcc-sm8550.c +index b883dffe5f7aa..eb3765c57b650 100644 +--- a/drivers/clk/qcom/gcc-sm8550.c ++++ b/drivers/clk/qcom/gcc-sm8550.c +@@ -536,7 +536,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s0_clk_src = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }, + }; + +@@ -551,7 +551,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s1_clk_src = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }, + }; + +@@ -566,7 +566,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s2_clk_src = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }, + }; + +@@ -581,7 +581,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s3_clk_src = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }, + }; + +@@ -596,7 +596,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s4_clk_src = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }, + }; + +@@ -611,7 +611,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s5_clk_src = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }, + }; + +@@ -626,7 +626,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s6_clk_src = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }, + }; + +@@ -641,7 +641,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s7_clk_src = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }, + }; + +@@ -656,7 +656,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s8_clk_src = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }, + }; + +@@ -671,7 +671,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s9_clk_src = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }, + }; + +@@ -700,7 +700,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }; + + static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = { +@@ -717,7 +717,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }; + + static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = { +@@ -750,7 +750,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }; + + static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = { +@@ -767,7 +767,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }; + + static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = { +@@ -784,7 +784,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }; + + static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = { +@@ -801,7 +801,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }; + + static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = { +@@ -818,7 +818,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }; + + static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = { +@@ -835,7 +835,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }; + + static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = { +@@ -852,7 +852,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }; + + static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = { +@@ -869,7 +869,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }; + + static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = { +@@ -886,7 +886,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }; + + static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = { +@@ -903,7 +903,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }; + + static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = { +@@ -920,7 +920,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }; + + static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = { +@@ -937,7 +937,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }; + + static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = { +@@ -975,7 +975,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = { + .parent_data = gcc_parent_data_8, + .num_parents = ARRAY_SIZE(gcc_parent_data_8), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }; + + static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = { +@@ -992,7 +992,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_ops, + }; + + static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = { +@@ -1159,7 +1159,7 @@ static struct clk_rcg2 gcc_usb30_prim_master_clk_src = { + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_shared_ops, ++ .ops = &clk_rcg2_shared_no_init_park_ops, + }, + }; + +diff --git a/drivers/clk/starfive/clk-starfive-jh7110-sys.c b/drivers/clk/starfive/clk-starfive-jh7110-sys.c +index 3884eff9fe931..58ef7c6d69cce 100644 +--- a/drivers/clk/starfive/clk-starfive-jh7110-sys.c ++++ b/drivers/clk/starfive/clk-starfive-jh7110-sys.c +@@ -385,6 +385,32 @@ int jh7110_reset_controller_register(struct jh71x0_clk_priv *priv, + } + EXPORT_SYMBOL_GPL(jh7110_reset_controller_register); + ++/* ++ * This clock notifier is called when the rate of PLL0 clock is to be changed. ++ * The cpu_root clock should save the curent parent clock and switch its parent ++ * clock to osc before PLL0 rate will be changed. Then switch its parent clock ++ * back after the PLL0 rate is completed. ++ */ ++static int jh7110_pll0_clk_notifier_cb(struct notifier_block *nb, ++ unsigned long action, void *data) ++{ ++ struct jh71x0_clk_priv *priv = container_of(nb, struct jh71x0_clk_priv, pll_clk_nb); ++ struct clk *cpu_root = priv->reg[JH7110_SYSCLK_CPU_ROOT].hw.clk; ++ int ret = 0; ++ ++ if (action == PRE_RATE_CHANGE) { ++ struct clk *osc = clk_get(priv->dev, "osc"); ++ ++ priv->original_clk = clk_get_parent(cpu_root); ++ ret = clk_set_parent(cpu_root, osc); ++ clk_put(osc); ++ } else if (action == POST_RATE_CHANGE) { ++ ret = clk_set_parent(cpu_root, priv->original_clk); ++ } ++ ++ return notifier_from_errno(ret); ++} ++ + static int __init jh7110_syscrg_probe(struct platform_device *pdev) + { + struct jh71x0_clk_priv *priv; +@@ -413,7 +439,10 @@ static int __init jh7110_syscrg_probe(struct platform_device *pdev) + if (IS_ERR(priv->pll[0])) + return PTR_ERR(priv->pll[0]); + } else { +- clk_put(pllclk); ++ priv->pll_clk_nb.notifier_call = jh7110_pll0_clk_notifier_cb; ++ ret = clk_notifier_register(pllclk, &priv->pll_clk_nb); ++ if (ret) ++ return ret; + priv->pll[0] = NULL; + } + +diff --git a/drivers/clk/starfive/clk-starfive-jh71x0.h b/drivers/clk/starfive/clk-starfive-jh71x0.h +index 34bb11c72eb73..ebc55b9ef8370 100644 +--- a/drivers/clk/starfive/clk-starfive-jh71x0.h ++++ b/drivers/clk/starfive/clk-starfive-jh71x0.h +@@ -114,6 +114,8 @@ struct jh71x0_clk_priv { + spinlock_t rmw_lock; + struct device *dev; + void __iomem *base; ++ struct clk *original_clk; ++ struct notifier_block pll_clk_nb; + struct clk_hw *pll[3]; + struct jh71x0_clk reg[]; + }; +diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c +index bd64a8a8427f3..92c025b70eb62 100644 +--- a/drivers/clocksource/timer-imx-tpm.c ++++ b/drivers/clocksource/timer-imx-tpm.c +@@ -83,20 +83,28 @@ static u64 notrace tpm_read_sched_clock(void) + static int tpm_set_next_event(unsigned long delta, + struct clock_event_device *evt) + { +- unsigned long next, now; ++ unsigned long next, prev, now; + +- next = tpm_read_counter(); +- next += delta; ++ prev = tpm_read_counter(); ++ next = prev + delta; + writel(next, timer_base + TPM_C0V); + now = tpm_read_counter(); + ++ /* ++ * Need to wait CNT increase at least 1 cycle to make sure ++ * the C0V has been updated into HW. ++ */ ++ if ((next & 0xffffffff) != readl(timer_base + TPM_C0V)) ++ while (now == tpm_read_counter()) ++ ; ++ + /* + * NOTE: We observed in a very small probability, the bus fabric + * contention between GPU and A7 may results a few cycles delay + * of writing CNT registers which may cause the min_delta event got + * missed, so we need add a ETIME check here in case it happened. + */ +- return (int)(next - now) <= 0 ? -ETIME : 0; ++ return (now - prev) >= delta ? -ETIME : 0; + } + + static int tpm_set_state_oneshot(struct clock_event_device *evt) +diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c +index c3f54d9912be7..420202bf76e42 100644 +--- a/drivers/clocksource/timer-of.c ++++ b/drivers/clocksource/timer-of.c +@@ -25,10 +25,7 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq) + + struct clock_event_device *clkevt = &to->clkevt; + +- if (of_irq->percpu) +- free_percpu_irq(of_irq->irq, clkevt); +- else +- free_irq(of_irq->irq, clkevt); ++ free_irq(of_irq->irq, clkevt); + } + + /** +@@ -42,9 +39,6 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq) + * - Get interrupt number by name + * - Get interrupt number by index + * +- * When the interrupt is per CPU, 'request_percpu_irq()' is called, +- * otherwise 'request_irq()' is used. +- * + * Returns 0 on success, < 0 otherwise + */ + static __init int timer_of_irq_init(struct device_node *np, +@@ -69,12 +63,9 @@ static __init int timer_of_irq_init(struct device_node *np, + return -EINVAL; + } + +- ret = of_irq->percpu ? +- request_percpu_irq(of_irq->irq, of_irq->handler, +- np->full_name, clkevt) : +- request_irq(of_irq->irq, of_irq->handler, +- of_irq->flags ? of_irq->flags : IRQF_TIMER, +- np->full_name, clkevt); ++ ret = request_irq(of_irq->irq, of_irq->handler, ++ of_irq->flags ? of_irq->flags : IRQF_TIMER, ++ np->full_name, clkevt); + if (ret) { + pr_err("Failed to request irq %d for %pOF\n", of_irq->irq, np); + return ret; +diff --git a/drivers/clocksource/timer-of.h b/drivers/clocksource/timer-of.h +index a5478f3e8589d..01a2c6b7db065 100644 +--- a/drivers/clocksource/timer-of.h ++++ b/drivers/clocksource/timer-of.h +@@ -11,7 +11,6 @@ + struct of_timer_irq { + int irq; + int index; +- int percpu; + const char *name; + unsigned long flags; + irq_handler_t handler; +diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c +index 23c74e9f04c48..f461f99eb040c 100644 +--- a/drivers/cpufreq/amd-pstate.c ++++ b/drivers/cpufreq/amd-pstate.c +@@ -37,6 +37,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -49,6 +50,8 @@ + + #define AMD_PSTATE_TRANSITION_LATENCY 20000 + #define AMD_PSTATE_TRANSITION_DELAY 1000 ++#define CPPC_HIGHEST_PERF_PERFORMANCE 196 ++#define CPPC_HIGHEST_PERF_DEFAULT 166 + + /* + * TODO: We need more time to fine tune processors with shared memory solution +@@ -64,6 +67,7 @@ static struct cpufreq_driver amd_pstate_driver; + static struct cpufreq_driver amd_pstate_epp_driver; + static int cppc_state = AMD_PSTATE_UNDEFINED; + static bool cppc_enabled; ++static bool amd_pstate_prefcore = true; + + /* + * AMD Energy Preference Performance (EPP) +@@ -310,6 +314,21 @@ static inline int amd_pstate_enable(bool enable) + return static_call(amd_pstate_enable)(enable); + } + ++static u32 amd_pstate_highest_perf_set(struct amd_cpudata *cpudata) ++{ ++ struct cpuinfo_x86 *c = &cpu_data(0); ++ ++ /* ++ * For AMD CPUs with Family ID 19H and Model ID range 0x70 to 0x7f, ++ * the highest performance level is set to 196. ++ * https://bugzilla.kernel.org/show_bug.cgi?id=218759 ++ */ ++ if (c->x86 == 0x19 && (c->x86_model >= 0x70 && c->x86_model <= 0x7f)) ++ return CPPC_HIGHEST_PERF_PERFORMANCE; ++ ++ return CPPC_HIGHEST_PERF_DEFAULT; ++} ++ + static int pstate_init_perf(struct amd_cpudata *cpudata) + { + u64 cap1; +@@ -320,13 +339,14 @@ static int pstate_init_perf(struct amd_cpudata *cpudata) + if (ret) + return ret; + +- /* +- * TODO: Introduce AMD specific power feature. +- * +- * CPPC entry doesn't indicate the highest performance in some ASICs. ++ /* For platforms that do not support the preferred core feature, the ++ * highest_pef may be configured with 166 or 255, to avoid max frequency ++ * calculated wrongly. we take the AMD_CPPC_HIGHEST_PERF(cap1) value as ++ * the default max perf. + */ +- highest_perf = amd_get_highest_perf(); +- if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1)) ++ if (cpudata->hw_prefcore) ++ highest_perf = amd_pstate_highest_perf_set(cpudata); ++ else + highest_perf = AMD_CPPC_HIGHEST_PERF(cap1); + + WRITE_ONCE(cpudata->highest_perf, highest_perf); +@@ -347,8 +367,9 @@ static int cppc_init_perf(struct amd_cpudata *cpudata) + if (ret) + return ret; + +- highest_perf = amd_get_highest_perf(); +- if (highest_perf > cppc_perf.highest_perf) ++ if (cpudata->hw_prefcore) ++ highest_perf = amd_pstate_highest_perf_set(cpudata); ++ else + highest_perf = cppc_perf.highest_perf; + + WRITE_ONCE(cpudata->highest_perf, highest_perf); +@@ -709,6 +730,80 @@ static void amd_perf_ctl_reset(unsigned int cpu) + wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0); + } + ++/* ++ * Set amd-pstate preferred core enable can't be done directly from cpufreq callbacks ++ * due to locking, so queue the work for later. ++ */ ++static void amd_pstste_sched_prefcore_workfn(struct work_struct *work) ++{ ++ sched_set_itmt_support(); ++} ++static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn); ++ ++/* ++ * Get the highest performance register value. ++ * @cpu: CPU from which to get highest performance. ++ * @highest_perf: Return address. ++ * ++ * Return: 0 for success, -EIO otherwise. ++ */ ++static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf) ++{ ++ int ret; ++ ++ if (boot_cpu_has(X86_FEATURE_CPPC)) { ++ u64 cap1; ++ ++ ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1); ++ if (ret) ++ return ret; ++ WRITE_ONCE(*highest_perf, AMD_CPPC_HIGHEST_PERF(cap1)); ++ } else { ++ u64 cppc_highest_perf; ++ ++ ret = cppc_get_highest_perf(cpu, &cppc_highest_perf); ++ if (ret) ++ return ret; ++ WRITE_ONCE(*highest_perf, cppc_highest_perf); ++ } ++ ++ return (ret); ++} ++ ++#define CPPC_MAX_PERF U8_MAX ++ ++static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata) ++{ ++ int ret, prio; ++ u32 highest_perf; ++ ++ ret = amd_pstate_get_highest_perf(cpudata->cpu, &highest_perf); ++ if (ret) ++ return; ++ ++ cpudata->hw_prefcore = true; ++ /* check if CPPC preferred core feature is enabled*/ ++ if (highest_perf < CPPC_MAX_PERF) ++ prio = (int)highest_perf; ++ else { ++ pr_debug("AMD CPPC preferred core is unsupported!\n"); ++ cpudata->hw_prefcore = false; ++ return; ++ } ++ ++ if (!amd_pstate_prefcore) ++ return; ++ ++ /* ++ * The priorities can be set regardless of whether or not ++ * sched_set_itmt_support(true) has been called and it is valid to ++ * update them at any time after it has been called. ++ */ ++ sched_set_itmt_core_prio(prio, cpudata->cpu); ++ ++ schedule_work(&sched_prefcore_work); ++} ++ + static int amd_pstate_cpu_init(struct cpufreq_policy *policy) + { + int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret; +@@ -730,6 +825,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) + + cpudata->cpu = policy->cpu; + ++ amd_pstate_init_prefcore(cpudata); ++ + ret = amd_pstate_init_perf(cpudata); + if (ret) + goto free_cpudata1; +@@ -880,6 +977,17 @@ static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy, + return sysfs_emit(buf, "%u\n", perf); + } + ++static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy, ++ char *buf) ++{ ++ bool hw_prefcore; ++ struct amd_cpudata *cpudata = policy->driver_data; ++ ++ hw_prefcore = READ_ONCE(cpudata->hw_prefcore); ++ ++ return sysfs_emit(buf, "%s\n", str_enabled_disabled(hw_prefcore)); ++} ++ + static ssize_t show_energy_performance_available_preferences( + struct cpufreq_policy *policy, char *buf) + { +@@ -1077,18 +1185,27 @@ static ssize_t status_store(struct device *a, struct device_attribute *b, + return ret < 0 ? ret : count; + } + ++static ssize_t prefcore_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ return sysfs_emit(buf, "%s\n", str_enabled_disabled(amd_pstate_prefcore)); ++} ++ + cpufreq_freq_attr_ro(amd_pstate_max_freq); + cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq); + + cpufreq_freq_attr_ro(amd_pstate_highest_perf); ++cpufreq_freq_attr_ro(amd_pstate_hw_prefcore); + cpufreq_freq_attr_rw(energy_performance_preference); + cpufreq_freq_attr_ro(energy_performance_available_preferences); + static DEVICE_ATTR_RW(status); ++static DEVICE_ATTR_RO(prefcore); + + static struct freq_attr *amd_pstate_attr[] = { + &amd_pstate_max_freq, + &amd_pstate_lowest_nonlinear_freq, + &amd_pstate_highest_perf, ++ &amd_pstate_hw_prefcore, + NULL, + }; + +@@ -1096,6 +1213,7 @@ static struct freq_attr *amd_pstate_epp_attr[] = { + &amd_pstate_max_freq, + &amd_pstate_lowest_nonlinear_freq, + &amd_pstate_highest_perf, ++ &amd_pstate_hw_prefcore, + &energy_performance_preference, + &energy_performance_available_preferences, + NULL, +@@ -1103,6 +1221,7 @@ static struct freq_attr *amd_pstate_epp_attr[] = { + + static struct attribute *pstate_global_attributes[] = { + &dev_attr_status.attr, ++ &dev_attr_prefcore.attr, + NULL + }; + +@@ -1154,6 +1273,8 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) + cpudata->cpu = policy->cpu; + cpudata->epp_policy = 0; + ++ amd_pstate_init_prefcore(cpudata); ++ + ret = amd_pstate_init_perf(cpudata); + if (ret) + goto free_cpudata1; +@@ -1577,7 +1698,17 @@ static int __init amd_pstate_param(char *str) + + return amd_pstate_set_driver(mode_idx); + } ++ ++static int __init amd_prefcore_param(char *str) ++{ ++ if (!strcmp(str, "disable")) ++ amd_pstate_prefcore = false; ++ ++ return 0; ++} ++ + early_param("amd_pstate", amd_pstate_param); ++early_param("amd_prefcore", amd_prefcore_param); + + MODULE_AUTHOR("Huang Rui "); + MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver"); +diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c +index 70ef119639381..43af81fcab868 100644 +--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c ++++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c +@@ -100,7 +100,9 @@ static u32 adf_gen2_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr) + errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK); + ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3); + +- errmsk3 &= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled); ++ /* Update only section of errmsk3 related to VF2PF */ ++ errmsk3 &= ~ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK); ++ errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled); + ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3); + + /* Return the sources of the (new) interrupt(s) */ +diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +index 09551f9491265..0e40897cc983a 100644 +--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c ++++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +@@ -191,8 +191,12 @@ static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr) + ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3); + ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5); + +- errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled); +- errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled); ++ /* Update only section of errmsk3 and errmsk5 related to VF2PF */ ++ errmsk3 &= ~ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK); ++ errmsk5 &= ~ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK); ++ ++ errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled); ++ errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled); + ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3); + ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5); + +diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h +index fe011d50473d7..607f70292b215 100644 +--- a/drivers/crypto/starfive/jh7110-cryp.h ++++ b/drivers/crypto/starfive/jh7110-cryp.h +@@ -30,6 +30,7 @@ + #define MAX_KEY_SIZE SHA512_BLOCK_SIZE + #define STARFIVE_AES_IV_LEN AES_BLOCK_SIZE + #define STARFIVE_AES_CTR_LEN AES_BLOCK_SIZE ++#define STARFIVE_RSA_MAX_KEYSZ 256 + + union starfive_aes_csr { + u32 v; +@@ -212,12 +213,11 @@ struct starfive_cryp_request_ctx { + struct scatterlist *out_sg; + struct ahash_request ahash_fbk_req; + size_t total; +- size_t nents; + unsigned int blksize; + unsigned int digsize; + unsigned long in_sg_len; + unsigned char *adata; +- u8 rsa_data[] __aligned(sizeof(u32)); ++ u8 rsa_data[STARFIVE_RSA_MAX_KEYSZ] __aligned(sizeof(u32)); + }; + + struct starfive_cryp_dev *starfive_cryp_find_dev(struct starfive_cryp_ctx *ctx); +diff --git a/drivers/crypto/starfive/jh7110-rsa.c b/drivers/crypto/starfive/jh7110-rsa.c +index f31bbd825f883..1db9a3d02848b 100644 +--- a/drivers/crypto/starfive/jh7110-rsa.c ++++ b/drivers/crypto/starfive/jh7110-rsa.c +@@ -37,7 +37,6 @@ + // A * A * R mod N ==> A + #define CRYPTO_CMD_AARN 0x7 + +-#define STARFIVE_RSA_MAX_KEYSZ 256 + #define STARFIVE_RSA_RESET 0x2 + + static inline int starfive_pka_wait_done(struct starfive_cryp_ctx *ctx) +@@ -91,7 +90,7 @@ static int starfive_rsa_montgomery_form(struct starfive_cryp_ctx *ctx, + { + struct starfive_cryp_dev *cryp = ctx->cryp; + struct starfive_cryp_request_ctx *rctx = ctx->rctx; +- int count = rctx->total / sizeof(u32) - 1; ++ int count = (ALIGN(rctx->total, 4) / 4) - 1; + int loop; + u32 temp; + u8 opsize; +@@ -274,12 +273,17 @@ static int starfive_rsa_enc_core(struct starfive_cryp_ctx *ctx, int enc) + struct starfive_cryp_dev *cryp = ctx->cryp; + struct starfive_cryp_request_ctx *rctx = ctx->rctx; + struct starfive_rsa_key *key = &ctx->rsa_key; +- int ret = 0; ++ int ret = 0, shift = 0; + + writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET); + +- rctx->total = sg_copy_to_buffer(rctx->in_sg, rctx->nents, +- rctx->rsa_data, rctx->total); ++ if (!IS_ALIGNED(rctx->total, sizeof(u32))) { ++ shift = sizeof(u32) - (rctx->total & 0x3); ++ memset(rctx->rsa_data, 0, shift); ++ } ++ ++ rctx->total = sg_copy_to_buffer(rctx->in_sg, sg_nents(rctx->in_sg), ++ rctx->rsa_data + shift, rctx->total); + + if (enc) { + key->bitlen = key->e_bitlen; +@@ -329,7 +333,6 @@ static int starfive_rsa_enc(struct akcipher_request *req) + rctx->in_sg = req->src; + rctx->out_sg = req->dst; + rctx->total = req->src_len; +- rctx->nents = sg_nents(rctx->in_sg); + ctx->rctx = rctx; + + return starfive_rsa_enc_core(ctx, 1); +diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c +index bc5a43897d578..5060d9802795e 100644 +--- a/drivers/cxl/core/region.c ++++ b/drivers/cxl/core/region.c +@@ -1528,10 +1528,13 @@ static int cxl_region_attach_position(struct cxl_region *cxlr, + const struct cxl_dport *dport, int pos) + { + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); ++ struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd; ++ struct cxl_decoder *cxld = &cxlsd->cxld; ++ int iw = cxld->interleave_ways; + struct cxl_port *iter; + int rc; + +- if (cxlrd->calc_hb(cxlrd, pos) != dport) { ++ if (dport != cxlrd->cxlsd.target[pos % iw]) { + dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n", + dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), + dev_name(&cxlrd->cxlsd.cxld.dev)); +diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c +index a1da7581adb03..e62ffffe5fb8d 100644 +--- a/drivers/firmware/cirrus/cs_dsp.c ++++ b/drivers/firmware/cirrus/cs_dsp.c +@@ -796,6 +796,9 @@ int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, + + lockdep_assert_held(&ctl->dsp->pwr_lock); + ++ if (ctl->flags && !(ctl->flags & WMFW_CTL_FLAG_WRITEABLE)) ++ return -EPERM; ++ + if (len + off * sizeof(u32) > ctl->len) + return -EINVAL; + +diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c +index b35b9604413f0..caeb3bdc78f8d 100644 +--- a/drivers/gpio/gpio-rockchip.c ++++ b/drivers/gpio/gpio-rockchip.c +@@ -713,6 +713,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev) + return -ENODEV; + + pctldev = of_pinctrl_get(pctlnp); ++ of_node_put(pctlnp); + if (!pctldev) + return -EPROBE_DEFER; + +diff --git a/drivers/gpio/gpio-zynqmp-modepin.c b/drivers/gpio/gpio-zynqmp-modepin.c +index a0d69387c1532..2f3c9ebfa78d1 100644 +--- a/drivers/gpio/gpio-zynqmp-modepin.c ++++ b/drivers/gpio/gpio-zynqmp-modepin.c +@@ -146,6 +146,7 @@ static const struct of_device_id modepin_platform_id[] = { + { .compatible = "xlnx,zynqmp-gpio-modepin", }, + { } + }; ++MODULE_DEVICE_TABLE(of, modepin_platform_id); + + static struct platform_driver modepin_platform_driver = { + .driver = { +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +index 61668a784315f..e361dc37a0890 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +@@ -1096,6 +1096,21 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) + unsigned int i; + int r; + ++ /* ++ * We can't use gang submit on with reserved VMIDs when the VM changes ++ * can't be invalidated by more than one engine at the same time. ++ */ ++ if (p->gang_size > 1 && !p->adev->vm_manager.concurrent_flush) { ++ for (i = 0; i < p->gang_size; ++i) { ++ struct drm_sched_entity *entity = p->entities[i]; ++ struct drm_gpu_scheduler *sched = entity->rq->sched; ++ struct amdgpu_ring *ring = to_amdgpu_ring(sched); ++ ++ if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub)) ++ return -EINVAL; ++ } ++ } ++ + r = amdgpu_vm_clear_freed(adev, vm, NULL); + if (r) + return r; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +index 578aeba49ea8e..5fbb9caa7415f 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +@@ -909,8 +909,7 @@ static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb) + { + u64 micro_tile_mode; + +- /* Zero swizzle mode means linear */ +- if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0) ++ if (AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) == 1) /* LINEAR_ALIGNED */ + return 0; + + micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE); +@@ -1034,6 +1033,30 @@ static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb) + block_width = 256 / format_info->cpp[i]; + block_height = 1; + block_size_log2 = 8; ++ } else if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX12) { ++ int swizzle = AMD_FMT_MOD_GET(TILE, modifier); ++ ++ switch (swizzle) { ++ case AMD_FMT_MOD_TILE_GFX12_256B_2D: ++ block_size_log2 = 8; ++ break; ++ case AMD_FMT_MOD_TILE_GFX12_4K_2D: ++ block_size_log2 = 12; ++ break; ++ case AMD_FMT_MOD_TILE_GFX12_64K_2D: ++ block_size_log2 = 16; ++ break; ++ case AMD_FMT_MOD_TILE_GFX12_256K_2D: ++ block_size_log2 = 18; ++ break; ++ default: ++ drm_dbg_kms(rfb->base.dev, ++ "Gfx12 swizzle mode with unknown block size: %d\n", swizzle); ++ return -EINVAL; ++ } ++ ++ get_block_dimensions(block_size_log2, format_info->cpp[i], ++ &block_width, &block_height); + } else { + int swizzle = AMD_FMT_MOD_GET(TILE, modifier); + +@@ -1069,7 +1092,8 @@ static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb) + return ret; + } + +- if (AMD_FMT_MOD_GET(DCC, modifier)) { ++ if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11 && ++ AMD_FMT_MOD_GET(DCC, modifier)) { + if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) { + block_size_log2 = get_dcc_block_size(modifier, false, false); + get_block_dimensions(block_size_log2 + 8, format_info->cpp[0], +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +index ff1ea99292fbf..69dfc699d78b0 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +@@ -409,7 +409,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, + if (r || !idle) + goto error; + +- if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) { ++ if (amdgpu_vmid_uses_reserved(vm, vmhub)) { + r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence); + if (r || !id) + goto error; +@@ -459,6 +459,19 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, + return r; + } + ++/* ++ * amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID ++ * @vm: the VM to check ++ * @vmhub: the VMHUB which will be used ++ * ++ * Returns: True if the VM will use a reserved VMID. ++ */ ++bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub) ++{ ++ return vm->reserved_vmid[vmhub] || ++ (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0))); ++} ++ + int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, + unsigned vmhub) + { +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h +index fa8c42c83d5d2..240fa67512602 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h +@@ -78,6 +78,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv, + + bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, + struct amdgpu_vmid *id); ++bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub); + int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, + unsigned vmhub); + void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +index d9dc675b46aed..22575422ca7ec 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +@@ -137,8 +137,10 @@ int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init) + + if (virt->ops && virt->ops->req_full_gpu) { + r = virt->ops->req_full_gpu(adev, init); +- if (r) ++ if (r) { ++ adev->no_hw_access = true; + return r; ++ } + + adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; + } +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +index c81e98f0d17ff..c813cd7b015e1 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +@@ -4269,11 +4269,11 @@ static int gfx_v11_0_hw_init(void *handle) + /* RLC autoload sequence 1: Program rlc ram */ + if (adev->gfx.imu.funcs->program_rlc_ram) + adev->gfx.imu.funcs->program_rlc_ram(adev); ++ /* rlc autoload firmware */ ++ r = gfx_v11_0_rlc_backdoor_autoload_enable(adev); ++ if (r) ++ return r; + } +- /* rlc autoload firmware */ +- r = gfx_v11_0_rlc_backdoor_autoload_enable(adev); +- if (r) +- return r; + } else { + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { + if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { +diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c +index f432dc72df6a9..725b1a585088d 100644 +--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c +@@ -135,6 +135,34 @@ static int ih_v6_0_toggle_ring_interrupts(struct amdgpu_device *adev, + + tmp = RREG32(ih_regs->ih_rb_cntl); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); ++ ++ if (enable) { ++ /* Unset the CLEAR_OVERFLOW bit to make sure the next step ++ * is switching the bit from 0 to 1 ++ */ ++ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); ++ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { ++ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) ++ return -ETIMEDOUT; ++ } else { ++ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); ++ } ++ ++ /* Clear RB_OVERFLOW bit */ ++ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); ++ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { ++ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) ++ return -ETIMEDOUT; ++ } else { ++ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); ++ } ++ ++ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows ++ * can be detected. ++ */ ++ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); ++ } ++ + /* enable_intr field is only valid in ring0 */ + if (ih == &adev->irq.ih) + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index 44c1556838240..f0ebf686b06f2 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -6937,7 +6937,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, + } + } + +- if (j == dc_state->stream_count) ++ if (j == dc_state->stream_count || pbn_div == 0) + continue; + + slot_num = DIV_ROUND_UP(pbn, pbn_div); +diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c +index 2c366866f5700..33bb96f770b86 100644 +--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c ++++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c +@@ -629,14 +629,14 @@ static bool construct_phy(struct dc_link *link, + link->link_enc = + link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data); + +- DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C); +- DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE); +- + if (!link->link_enc) { + DC_ERROR("Failed to create link encoder!\n"); + goto link_enc_create_fail; + } + ++ DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C); ++ DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE); ++ + /* Update link encoder tracking variables. These are used for the dynamic + * assignment of link encoders to streams. + */ +diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +index 1ddb4f5eac8e5..93c0455766ddb 100644 +--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c ++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +@@ -433,17 +433,20 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp, + } + + if (status == MOD_HDCP_STATUS_SUCCESS) +- mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, ++ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, + &input->bstatus_read, &status, +- hdcp, "bstatus_read"); ++ hdcp, "bstatus_read")) ++ goto out; + if (status == MOD_HDCP_STATUS_SUCCESS) +- mod_hdcp_execute_and_set(check_link_integrity_dp, ++ if (!mod_hdcp_execute_and_set(check_link_integrity_dp, + &input->link_integrity_check, &status, +- hdcp, "link_integrity_check"); ++ hdcp, "link_integrity_check")) ++ goto out; + if (status == MOD_HDCP_STATUS_SUCCESS) +- mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, ++ if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, + &input->reauth_request_check, &status, +- hdcp, "reauth_request_check"); ++ hdcp, "reauth_request_check")) ++ goto out; + out: + return status; + } +diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +index c9a9c758531bc..c0d7fe5102e6d 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +@@ -1883,7 +1883,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, + smu_dpm_ctx->dpm_level = level; + } + +- if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { ++ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && ++ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { + index = fls(smu->workload_mask); + index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; + workload[0] = smu->workload_setting[index]; +@@ -1962,7 +1963,8 @@ static int smu_switch_power_profile(void *handle, + workload[0] = smu->workload_setting[index]; + } + +- if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ++ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && ++ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) + smu_bump_power_profile_mode(smu, workload, 0); + + return 0; +diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c +index 0d3b22a743659..e251e061d1adb 100644 +--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c ++++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c +@@ -304,7 +304,7 @@ void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc) + { + struct intel_gt *gt = gsc_uc_to_gt(gsc); + +- if (!intel_uc_fw_is_loadable(&gsc->fw)) ++ if (!intel_uc_fw_is_loadable(&gsc->fw) || intel_uc_fw_is_in_error(&gsc->fw)) + return; + + if (intel_gsc_uc_fw_init_done(gsc)) +diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +index 9a431726c8d5b..ac7b3aad2222e 100644 +--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h ++++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +@@ -258,6 +258,11 @@ static inline bool intel_uc_fw_is_running(struct intel_uc_fw *uc_fw) + return __intel_uc_fw_status(uc_fw) == INTEL_UC_FIRMWARE_RUNNING; + } + ++static inline bool intel_uc_fw_is_in_error(struct intel_uc_fw *uc_fw) ++{ ++ return intel_uc_fw_status_to_error(__intel_uc_fw_status(uc_fw)) != 0; ++} ++ + static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw) + { + return uc_fw->user_overridden; +diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c +index 8a9aad523eec2..1d4cc91c0e40d 100644 +--- a/drivers/gpu/drm/i915/i915_sw_fence.c ++++ b/drivers/gpu/drm/i915/i915_sw_fence.c +@@ -51,7 +51,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence) + debug_object_init(fence, &i915_sw_fence_debug_descr); + } + +-static inline void debug_fence_init_onstack(struct i915_sw_fence *fence) ++static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence) + { + debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr); + } +@@ -77,7 +77,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence) + debug_object_destroy(fence, &i915_sw_fence_debug_descr); + } + +-static inline void debug_fence_free(struct i915_sw_fence *fence) ++static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence) + { + debug_object_free(fence, &i915_sw_fence_debug_descr); + smp_wmb(); /* flush the change in state before reallocation */ +@@ -94,7 +94,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence) + { + } + +-static inline void debug_fence_init_onstack(struct i915_sw_fence *fence) ++static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence) + { + } + +@@ -115,7 +115,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence) + { + } + +-static inline void debug_fence_free(struct i915_sw_fence *fence) ++static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence) + { + } + +diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c +index 705b523370684..81f3024b7b1b5 100644 +--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c ++++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c +@@ -171,11 +171,13 @@ int amdtp_hid_probe(u32 cur_hid_dev, struct amdtp_cl_data *cli_data) + void amdtp_hid_remove(struct amdtp_cl_data *cli_data) + { + int i; ++ struct amdtp_hid_data *hid_data; + + for (i = 0; i < cli_data->num_hid_devices; ++i) { + if (cli_data->hid_sensor_hubs[i]) { +- kfree(cli_data->hid_sensor_hubs[i]->driver_data); ++ hid_data = cli_data->hid_sensor_hubs[i]->driver_data; + hid_destroy_device(cli_data->hid_sensor_hubs[i]); ++ kfree(hid_data); + cli_data->hid_sensor_hubs[i] = NULL; + } + } +diff --git a/drivers/hid/hid-cougar.c b/drivers/hid/hid-cougar.c +index cb8bd8aae15b5..0fa785f52707a 100644 +--- a/drivers/hid/hid-cougar.c ++++ b/drivers/hid/hid-cougar.c +@@ -106,7 +106,7 @@ static void cougar_fix_g6_mapping(void) + static __u8 *cougar_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) + { +- if (rdesc[2] == 0x09 && rdesc[3] == 0x02 && ++ if (*rsize >= 117 && rdesc[2] == 0x09 && rdesc[3] == 0x02 && + (rdesc[115] | rdesc[116] << 8) >= HID_MAX_USAGES) { + hid_info(hdev, + "usage count exceeds max: fixing up report descriptor\n"); +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c +index edbb38f6956b9..756aebf324735 100644 +--- a/drivers/hv/vmbus_drv.c ++++ b/drivers/hv/vmbus_drv.c +@@ -1962,6 +1962,7 @@ void vmbus_device_unregister(struct hv_device *device_obj) + */ + device_unregister(&device_obj->device); + } ++EXPORT_SYMBOL_GPL(vmbus_device_unregister); + + #ifdef CONFIG_ACPI + /* +diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c +index 46e3c8c507657..73fd967998472 100644 +--- a/drivers/hwmon/adc128d818.c ++++ b/drivers/hwmon/adc128d818.c +@@ -176,7 +176,7 @@ static ssize_t adc128_in_store(struct device *dev, + + mutex_lock(&data->update_lock); + /* 10 mV LSB on limit registers */ +- regval = clamp_val(DIV_ROUND_CLOSEST(val, 10), 0, 255); ++ regval = DIV_ROUND_CLOSEST(clamp_val(val, 0, 2550), 10); + data->in[index][nr] = regval << 4; + reg = index == 1 ? ADC128_REG_IN_MIN(nr) : ADC128_REG_IN_MAX(nr); + i2c_smbus_write_byte_data(data->client, reg, regval); +@@ -214,7 +214,7 @@ static ssize_t adc128_temp_store(struct device *dev, + return err; + + mutex_lock(&data->update_lock); +- regval = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127); ++ regval = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000); + data->temp[index] = regval << 1; + i2c_smbus_write_byte_data(data->client, + index == 1 ? ADC128_REG_TEMP_MAX +diff --git a/drivers/hwmon/hp-wmi-sensors.c b/drivers/hwmon/hp-wmi-sensors.c +index b5325d0e72b9c..dfa1d6926deac 100644 +--- a/drivers/hwmon/hp-wmi-sensors.c ++++ b/drivers/hwmon/hp-wmi-sensors.c +@@ -1637,6 +1637,8 @@ static void hp_wmi_notify(u32 value, void *context) + goto out_unlock; + + wobj = out.pointer; ++ if (!wobj) ++ goto out_unlock; + + err = populate_event_from_wobj(dev, &event, wobj); + if (err) { +diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c +index 67b9d7636ee42..37e8e9679aeb6 100644 +--- a/drivers/hwmon/lm95234.c ++++ b/drivers/hwmon/lm95234.c +@@ -301,7 +301,8 @@ static ssize_t tcrit2_store(struct device *dev, struct device_attribute *attr, + if (ret < 0) + return ret; + +- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, index ? 255 : 127); ++ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, (index ? 255 : 127) * 1000), ++ 1000); + + mutex_lock(&data->update_lock); + data->tcrit2[index] = val; +@@ -350,7 +351,7 @@ static ssize_t tcrit1_store(struct device *dev, struct device_attribute *attr, + if (ret < 0) + return ret; + +- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 255); ++ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 255000), 1000); + + mutex_lock(&data->update_lock); + data->tcrit1[index] = val; +@@ -391,7 +392,7 @@ static ssize_t tcrit1_hyst_store(struct device *dev, + if (ret < 0) + return ret; + +- val = DIV_ROUND_CLOSEST(val, 1000); ++ val = DIV_ROUND_CLOSEST(clamp_val(val, -255000, 255000), 1000); + val = clamp_val((int)data->tcrit1[index] - val, 0, 31); + + mutex_lock(&data->update_lock); +@@ -431,7 +432,7 @@ static ssize_t offset_store(struct device *dev, struct device_attribute *attr, + return ret; + + /* Accuracy is 1/2 degrees C */ +- val = clamp_val(DIV_ROUND_CLOSEST(val, 500), -128, 127); ++ val = DIV_ROUND_CLOSEST(clamp_val(val, -64000, 63500), 500); + + mutex_lock(&data->update_lock); + data->toffset[index] = val; +diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c +index f3bf2e4701c38..8da7aa1614d7d 100644 +--- a/drivers/hwmon/nct6775-core.c ++++ b/drivers/hwmon/nct6775-core.c +@@ -2262,7 +2262,7 @@ store_temp_offset(struct device *dev, struct device_attribute *attr, + if (err < 0) + return err; + +- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127); ++ val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000); + + mutex_lock(&data->update_lock); + data->temp_offset[nr] = val; +diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c +index fe960c0a624f7..7d7d70afde655 100644 +--- a/drivers/hwmon/w83627ehf.c ++++ b/drivers/hwmon/w83627ehf.c +@@ -895,7 +895,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr, + if (err < 0) + return err; + +- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127); ++ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 127000), 1000); + + mutex_lock(&data->update_lock); + data->target_temp[nr] = val; +@@ -920,7 +920,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr, + return err; + + /* Limit the temp to 0C - 15C */ +- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15); ++ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 15000), 1000); + + mutex_lock(&data->update_lock); + reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]); +diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c +index 337c95d43f3f6..edc3a69bfe31f 100644 +--- a/drivers/i3c/master/mipi-i3c-hci/dma.c ++++ b/drivers/i3c/master/mipi-i3c-hci/dma.c +@@ -291,7 +291,10 @@ static int hci_dma_init(struct i3c_hci *hci) + + rh->ibi_chunk_sz = dma_get_cache_alignment(); + rh->ibi_chunk_sz *= IBI_CHUNK_CACHELINES; +- BUG_ON(rh->ibi_chunk_sz > 256); ++ if (rh->ibi_chunk_sz > 256) { ++ ret = -EINVAL; ++ goto err_out; ++ } + + ibi_status_ring_sz = rh->ibi_status_sz * rh->ibi_status_entries; + ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total; +diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c +index ccd0c4680be29..acc937275c184 100644 +--- a/drivers/i3c/master/svc-i3c-master.c ++++ b/drivers/i3c/master/svc-i3c-master.c +@@ -1037,29 +1037,59 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master, + u8 *in, const u8 *out, unsigned int xfer_len, + unsigned int *actual_len, bool continued) + { ++ int retry = 2; + u32 reg; + int ret; + + /* clean SVC_I3C_MINT_IBIWON w1c bits */ + writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS); + +- writel(SVC_I3C_MCTRL_REQUEST_START_ADDR | +- xfer_type | +- SVC_I3C_MCTRL_IBIRESP_NACK | +- SVC_I3C_MCTRL_DIR(rnw) | +- SVC_I3C_MCTRL_ADDR(addr) | +- SVC_I3C_MCTRL_RDTERM(*actual_len), +- master->regs + SVC_I3C_MCTRL); + +- ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, ++ while (retry--) { ++ writel(SVC_I3C_MCTRL_REQUEST_START_ADDR | ++ xfer_type | ++ SVC_I3C_MCTRL_IBIRESP_NACK | ++ SVC_I3C_MCTRL_DIR(rnw) | ++ SVC_I3C_MCTRL_ADDR(addr) | ++ SVC_I3C_MCTRL_RDTERM(*actual_len), ++ master->regs + SVC_I3C_MCTRL); ++ ++ ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, + SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000); +- if (ret) +- goto emit_stop; ++ if (ret) ++ goto emit_stop; + +- if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) { +- ret = -ENXIO; +- *actual_len = 0; +- goto emit_stop; ++ if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) { ++ /* ++ * According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3. ++ * If the Controller chooses to start an I3C Message with an I3C Dynamic ++ * Address, then special provisions shall be made because that same I3C ++ * Target may be initiating an IBI or a Controller Role Request. So, one of ++ * three things may happen: (skip 1, 2) ++ * ++ * 3. The Addresses match and the RnW bits also match, and so neither ++ * Controller nor Target will ACK since both are expecting the other side to ++ * provide ACK. As a result, each side might think it had "won" arbitration, ++ * but neither side would continue, as each would subsequently see that the ++ * other did not provide ACK. ++ * ... ++ * For either value of RnW: Due to the NACK, the Controller shall defer the ++ * Private Write or Private Read, and should typically transmit the Target ++ * Address again after a Repeated START (i.e., the next one or any one prior ++ * to a STOP in the Frame). Since the Address Header following a Repeated ++ * START is not arbitrated, the Controller will always win (see Section ++ * 5.1.2.2.4). ++ */ ++ if (retry && addr != 0x7e) { ++ writel(SVC_I3C_MERRWARN_NACK, master->regs + SVC_I3C_MERRWARN); ++ } else { ++ ret = -ENXIO; ++ *actual_len = 0; ++ goto emit_stop; ++ } ++ } else { ++ break; ++ } + } + + /* +diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c +index b9b206fcd748f..2976c62b58c07 100644 +--- a/drivers/iio/adc/ad7124.c ++++ b/drivers/iio/adc/ad7124.c +@@ -146,15 +146,18 @@ struct ad7124_chip_info { + struct ad7124_channel_config { + bool live; + unsigned int cfg_slot; +- enum ad7124_ref_sel refsel; +- bool bipolar; +- bool buf_positive; +- bool buf_negative; +- unsigned int vref_mv; +- unsigned int pga_bits; +- unsigned int odr; +- unsigned int odr_sel_bits; +- unsigned int filter_type; ++ /* Following fields are used to compare equality. */ ++ struct_group(config_props, ++ enum ad7124_ref_sel refsel; ++ bool bipolar; ++ bool buf_positive; ++ bool buf_negative; ++ unsigned int vref_mv; ++ unsigned int pga_bits; ++ unsigned int odr; ++ unsigned int odr_sel_bits; ++ unsigned int filter_type; ++ ); + }; + + struct ad7124_channel { +@@ -333,11 +336,12 @@ static struct ad7124_channel_config *ad7124_find_similar_live_cfg(struct ad7124_ + ptrdiff_t cmp_size; + int i; + +- cmp_size = (u8 *)&cfg->live - (u8 *)cfg; ++ cmp_size = sizeof_field(struct ad7124_channel_config, config_props); + for (i = 0; i < st->num_channels; i++) { + cfg_aux = &st->channels[i].cfg; + +- if (cfg_aux->live && !memcmp(cfg, cfg_aux, cmp_size)) ++ if (cfg_aux->live && ++ !memcmp(&cfg->config_props, &cfg_aux->config_props, cmp_size)) + return cfg_aux; + } + +@@ -761,6 +765,7 @@ static int ad7124_soft_reset(struct ad7124_state *st) + if (ret < 0) + return ret; + ++ fsleep(200); + timeout = 100; + do { + ret = ad_sd_read_reg(&st->sd, AD7124_STATUS, 1, &readval); +diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c +index 1928d9ae5bcff..1c08c0921ee71 100644 +--- a/drivers/iio/adc/ad7606.c ++++ b/drivers/iio/adc/ad7606.c +@@ -49,7 +49,7 @@ static const unsigned int ad7616_oversampling_avail[8] = { + 1, 2, 4, 8, 16, 32, 64, 128, + }; + +-static int ad7606_reset(struct ad7606_state *st) ++int ad7606_reset(struct ad7606_state *st) + { + if (st->gpio_reset) { + gpiod_set_value(st->gpio_reset, 1); +@@ -60,6 +60,7 @@ static int ad7606_reset(struct ad7606_state *st) + + return -ENODEV; + } ++EXPORT_SYMBOL_NS_GPL(ad7606_reset, IIO_AD7606); + + static int ad7606_reg_access(struct iio_dev *indio_dev, + unsigned int reg, +@@ -88,31 +89,6 @@ static int ad7606_read_samples(struct ad7606_state *st) + { + unsigned int num = st->chip_info->num_channels - 1; + u16 *data = st->data; +- int ret; +- +- /* +- * The frstdata signal is set to high while and after reading the sample +- * of the first channel and low for all other channels. This can be used +- * to check that the incoming data is correctly aligned. During normal +- * operation the data should never become unaligned, but some glitch or +- * electrostatic discharge might cause an extra read or clock cycle. +- * Monitoring the frstdata signal allows to recover from such failure +- * situations. +- */ +- +- if (st->gpio_frstdata) { +- ret = st->bops->read_block(st->dev, 1, data); +- if (ret) +- return ret; +- +- if (!gpiod_get_value(st->gpio_frstdata)) { +- ad7606_reset(st); +- return -EIO; +- } +- +- data++; +- num--; +- } + + return st->bops->read_block(st->dev, num, data); + } +diff --git a/drivers/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h +index 0c6a88cc46958..6649e84d25de6 100644 +--- a/drivers/iio/adc/ad7606.h ++++ b/drivers/iio/adc/ad7606.h +@@ -151,6 +151,8 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address, + const char *name, unsigned int id, + const struct ad7606_bus_ops *bops); + ++int ad7606_reset(struct ad7606_state *st); ++ + enum ad7606_supported_device_ids { + ID_AD7605_4, + ID_AD7606_8, +diff --git a/drivers/iio/adc/ad7606_par.c b/drivers/iio/adc/ad7606_par.c +index d8408052262e4..6bc587b20f05d 100644 +--- a/drivers/iio/adc/ad7606_par.c ++++ b/drivers/iio/adc/ad7606_par.c +@@ -7,6 +7,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -21,8 +22,29 @@ static int ad7606_par16_read_block(struct device *dev, + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct ad7606_state *st = iio_priv(indio_dev); + +- insw((unsigned long)st->base_address, buf, count); + ++ /* ++ * On the parallel interface, the frstdata signal is set to high while ++ * and after reading the sample of the first channel and low for all ++ * other channels. This can be used to check that the incoming data is ++ * correctly aligned. During normal operation the data should never ++ * become unaligned, but some glitch or electrostatic discharge might ++ * cause an extra read or clock cycle. Monitoring the frstdata signal ++ * allows to recover from such failure situations. ++ */ ++ int num = count; ++ u16 *_buf = buf; ++ ++ if (st->gpio_frstdata) { ++ insw((unsigned long)st->base_address, _buf, 1); ++ if (!gpiod_get_value(st->gpio_frstdata)) { ++ ad7606_reset(st); ++ return -EIO; ++ } ++ _buf++; ++ num--; ++ } ++ insw((unsigned long)st->base_address, _buf, num); + return 0; + } + +@@ -35,8 +57,28 @@ static int ad7606_par8_read_block(struct device *dev, + { + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct ad7606_state *st = iio_priv(indio_dev); +- +- insb((unsigned long)st->base_address, buf, count * 2); ++ /* ++ * On the parallel interface, the frstdata signal is set to high while ++ * and after reading the sample of the first channel and low for all ++ * other channels. This can be used to check that the incoming data is ++ * correctly aligned. During normal operation the data should never ++ * become unaligned, but some glitch or electrostatic discharge might ++ * cause an extra read or clock cycle. Monitoring the frstdata signal ++ * allows to recover from such failure situations. ++ */ ++ int num = count; ++ u16 *_buf = buf; ++ ++ if (st->gpio_frstdata) { ++ insb((unsigned long)st->base_address, _buf, 2); ++ if (!gpiod_get_value(st->gpio_frstdata)) { ++ ad7606_reset(st); ++ return -EIO; ++ } ++ _buf++; ++ num--; ++ } ++ insb((unsigned long)st->base_address, _buf, num * 2); + + return 0; + } +diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c +index 0d53c0a07b0d6..db5dbd60cf675 100644 +--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c ++++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c +@@ -180,7 +180,7 @@ struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, + + ret = dma_get_slave_caps(chan, &caps); + if (ret < 0) +- goto err_free; ++ goto err_release; + + /* Needs to be aligned to the maximum of the minimums */ + if (caps.src_addr_widths) +@@ -206,6 +206,8 @@ struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, + + return &dmaengine_buffer->queue.buffer; + ++err_release: ++ dma_release_channel(chan); + err_free: + kfree(dmaengine_buffer); + return ERR_PTR(ret); +diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c +index b855565384757..80e1c45485c9b 100644 +--- a/drivers/iio/inkern.c ++++ b/drivers/iio/inkern.c +@@ -680,17 +680,17 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan, + break; + case IIO_VAL_INT_PLUS_MICRO: + if (scale_val2 < 0) +- *processed = -raw64 * scale_val; ++ *processed = -raw64 * scale_val * scale; + else +- *processed = raw64 * scale_val; ++ *processed = raw64 * scale_val * scale; + *processed += div_s64(raw64 * (s64)scale_val2 * scale, + 1000000LL); + break; + case IIO_VAL_INT_PLUS_NANO: + if (scale_val2 < 0) +- *processed = -raw64 * scale_val; ++ *processed = -raw64 * scale_val * scale; + else +- *processed = raw64 * scale_val; ++ *processed = raw64 * scale_val * scale; + *processed += div_s64(raw64 * (s64)scale_val2 * scale, + 1000000000LL); + break; +diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c +index d98212d55108c..2c973f15cab7d 100644 +--- a/drivers/input/misc/uinput.c ++++ b/drivers/input/misc/uinput.c +@@ -417,6 +417,20 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, + return -EINVAL; + } + ++ /* ++ * Limit number of contacts to a reasonable value (100). This ++ * ensures that we need less than 2 pages for struct input_mt ++ * (we are not using in-kernel slot assignment so not going to ++ * allocate memory for the "red" table), and we should have no ++ * trouble getting this much memory. ++ */ ++ if (code == ABS_MT_SLOT && max > 99) { ++ printk(KERN_DEBUG ++ "%s: unreasonably large number of slots requested: %d\n", ++ UINPUT_NAME, max); ++ return -EINVAL; ++ } ++ + return 0; + } + +diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c +index ae7ba0c419f5a..6a77babcf7228 100644 +--- a/drivers/input/touchscreen/ili210x.c ++++ b/drivers/input/touchscreen/ili210x.c +@@ -597,7 +597,7 @@ static int ili251x_firmware_to_buffer(const struct firmware *fw, + * once, copy them all into this buffer at the right locations, and then + * do all operations on this linear buffer. + */ +- fw_buf = kzalloc(SZ_64K, GFP_KERNEL); ++ fw_buf = kvmalloc(SZ_64K, GFP_KERNEL); + if (!fw_buf) + return -ENOMEM; + +@@ -627,7 +627,7 @@ static int ili251x_firmware_to_buffer(const struct firmware *fw, + return 0; + + err_big: +- kfree(fw_buf); ++ kvfree(fw_buf); + return error; + } + +@@ -870,7 +870,7 @@ static ssize_t ili210x_firmware_update_store(struct device *dev, + ili210x_hardware_reset(priv->reset_gpio); + dev_dbg(dev, "Firmware update ended, error=%i\n", error); + enable_irq(client->irq); +- kfree(fwbuf); ++ kvfree(fwbuf); + return error; + } + +diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c +index 23cb80d62a9ab..84f0459e503cf 100644 +--- a/drivers/iommu/intel/dmar.c ++++ b/drivers/iommu/intel/dmar.c +@@ -1422,7 +1422,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc, + */ + writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG); + +- while (qi->desc_status[wait_index] != QI_DONE) { ++ while (READ_ONCE(qi->desc_status[wait_index]) != QI_DONE) { + /* + * We will leave the interrupts disabled, to prevent interrupt + * context to queue another cmd while a cmd is already submitted +diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c +index 74c5cb93e9002..94bd7f25f6f26 100644 +--- a/drivers/iommu/sun50i-iommu.c ++++ b/drivers/iommu/sun50i-iommu.c +@@ -449,6 +449,7 @@ static int sun50i_iommu_enable(struct sun50i_iommu *iommu) + IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) | + IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) | + IOMMU_TLB_PREFETCH_MASTER_ENABLE(5)); ++ iommu_write(iommu, IOMMU_BYPASS_REG, 0); + iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK); + iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE), + IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) | +diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c +index a55528469278c..91a42e2d7a131 100644 +--- a/drivers/irqchip/irq-armada-370-xp.c ++++ b/drivers/irqchip/irq-armada-370-xp.c +@@ -566,6 +566,10 @@ static struct irq_chip armada_370_xp_irq_chip = { + static int armada_370_xp_mpic_irq_map(struct irq_domain *h, + unsigned int virq, irq_hw_number_t hw) + { ++ /* IRQs 0 and 1 cannot be mapped, they are handled internally */ ++ if (hw <= 1) ++ return -EINVAL; ++ + armada_370_xp_irq_mask(irq_get_irq_data(virq)); + if (!is_percpu_irq(hw)) + writel(hw, per_cpu_int_base + +diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c +index f2ff4387870d6..d83c2c85962c3 100644 +--- a/drivers/irqchip/irq-gic-v2m.c ++++ b/drivers/irqchip/irq-gic-v2m.c +@@ -438,12 +438,12 @@ static int __init gicv2m_of_init(struct fwnode_handle *parent_handle, + + ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis, + &res, 0); +- if (ret) { +- of_node_put(child); ++ if (ret) + break; +- } + } + ++ if (ret && child) ++ of_node_put(child); + if (!ret) + ret = gicv2m_allocate_domains(parent); + if (ret) +diff --git a/drivers/leds/leds-spi-byte.c b/drivers/leds/leds-spi-byte.c +index 9d91f21842f2b..afe9bff7c7c16 100644 +--- a/drivers/leds/leds-spi-byte.c ++++ b/drivers/leds/leds-spi-byte.c +@@ -91,7 +91,6 @@ static int spi_byte_probe(struct spi_device *spi) + dev_err(dev, "Device must have exactly one LED sub-node."); + return -EINVAL; + } +- child = of_get_next_available_child(dev_of_node(dev), NULL); + + led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL); + if (!led) +@@ -107,11 +106,13 @@ static int spi_byte_probe(struct spi_device *spi) + led->ldev.max_brightness = led->cdef->max_value - led->cdef->off_value; + led->ldev.brightness_set_blocking = spi_byte_brightness_set_blocking; + ++ child = of_get_next_available_child(dev_of_node(dev), NULL); + state = of_get_property(child, "default-state", NULL); + if (state) { + if (!strcmp(state, "on")) { + led->ldev.brightness = led->ldev.max_brightness; + } else if (strcmp(state, "off")) { ++ of_node_put(child); + /* all other cases except "off" */ + dev_err(dev, "default-state can only be 'on' or 'off'"); + return -EINVAL; +@@ -122,9 +123,12 @@ static int spi_byte_probe(struct spi_device *spi) + + ret = devm_led_classdev_register(&spi->dev, &led->ldev); + if (ret) { ++ of_node_put(child); + mutex_destroy(&led->mutex); + return ret; + } ++ ++ of_node_put(child); + spi_set_drvdata(spi, led); + + return 0; +diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c +index 2a71bcdba92d1..b37bbe7625003 100644 +--- a/drivers/md/dm-init.c ++++ b/drivers/md/dm-init.c +@@ -212,8 +212,10 @@ static char __init *dm_parse_device_entry(struct dm_device *dev, char *str) + strscpy(dev->dmi.uuid, field[1], sizeof(dev->dmi.uuid)); + /* minor */ + if (strlen(field[2])) { +- if (kstrtoull(field[2], 0, &dev->dmi.dev)) ++ if (kstrtoull(field[2], 0, &dev->dmi.dev) || ++ dev->dmi.dev >= (1 << MINORBITS)) + return ERR_PTR(-EINVAL); ++ dev->dmi.dev = huge_encode_dev((dev_t)dev->dmi.dev); + dev->dmi.flags |= DM_PERSISTENT_DEV_FLAG; + } + /* flags */ +diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c +index c6df862c79e39..8fa6750b5b421 100644 +--- a/drivers/media/platform/qcom/camss/camss.c ++++ b/drivers/media/platform/qcom/camss/camss.c +@@ -1038,8 +1038,11 @@ static int camss_of_parse_endpoint_node(struct device *dev, + struct v4l2_mbus_config_mipi_csi2 *mipi_csi2; + struct v4l2_fwnode_endpoint vep = { { 0 } }; + unsigned int i; ++ int ret; + +- v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep); ++ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep); ++ if (ret) ++ return ret; + + csd->interface.csiphy_id = vep.base.port; + +diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c +index 3a06df35a2d7c..99325bfed6431 100644 +--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c ++++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c +@@ -106,8 +106,9 @@ static int vid_cap_queue_setup(struct vb2_queue *vq, + if (*nplanes != buffers) + return -EINVAL; + for (p = 0; p < buffers; p++) { +- if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h + +- dev->fmt_cap->data_offset[p]) ++ if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h / ++ dev->fmt_cap->vdownsampling[p] + ++ dev->fmt_cap->data_offset[p]) + return -EINVAL; + } + } else { +@@ -1556,8 +1557,10 @@ int vidioc_s_edid(struct file *file, void *_fh, + return -EINVAL; + if (edid->blocks == 0) { + dev->edid_blocks = 0; +- v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0); +- v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0); ++ if (dev->num_outputs) { ++ v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0); ++ v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0); ++ } + phys_addr = CEC_PHYS_ADDR_INVALID; + goto set_phys_addr; + } +@@ -1581,8 +1584,10 @@ int vidioc_s_edid(struct file *file, void *_fh, + display_present |= + dev->display_present[i] << j++; + +- v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present); +- v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present); ++ if (dev->num_outputs) { ++ v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present); ++ v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present); ++ } + + set_phys_addr: + /* TODO: a proper hotplug detect cycle should be emulated here */ +diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.c b/drivers/media/test-drivers/vivid/vivid-vid-out.c +index 184a6df2c29fe..d05f547a587cd 100644 +--- a/drivers/media/test-drivers/vivid/vivid-vid-out.c ++++ b/drivers/media/test-drivers/vivid/vivid-vid-out.c +@@ -63,14 +63,16 @@ static int vid_out_queue_setup(struct vb2_queue *vq, + if (sizes[0] < size) + return -EINVAL; + for (p = 1; p < planes; p++) { +- if (sizes[p] < dev->bytesperline_out[p] * h + +- vfmt->data_offset[p]) ++ if (sizes[p] < dev->bytesperline_out[p] * h / ++ vfmt->vdownsampling[p] + ++ vfmt->data_offset[p]) + return -EINVAL; + } + } else { + for (p = 0; p < planes; p++) +- sizes[p] = p ? dev->bytesperline_out[p] * h + +- vfmt->data_offset[p] : size; ++ sizes[p] = p ? dev->bytesperline_out[p] * h / ++ vfmt->vdownsampling[p] + ++ vfmt->data_offset[p] : size; + } + + if (vq->num_buffers + *nbuffers < 2) +@@ -127,7 +129,7 @@ static int vid_out_buf_prepare(struct vb2_buffer *vb) + + for (p = 0; p < planes; p++) { + if (p) +- size = dev->bytesperline_out[p] * h; ++ size = dev->bytesperline_out[p] * h / vfmt->vdownsampling[p]; + size += vb->planes[p].data_offset; + + if (vb2_get_plane_payload(vb, p) < size) { +@@ -334,8 +336,8 @@ int vivid_g_fmt_vid_out(struct file *file, void *priv, + for (p = 0; p < mp->num_planes; p++) { + mp->plane_fmt[p].bytesperline = dev->bytesperline_out[p]; + mp->plane_fmt[p].sizeimage = +- mp->plane_fmt[p].bytesperline * mp->height + +- fmt->data_offset[p]; ++ mp->plane_fmt[p].bytesperline * mp->height / ++ fmt->vdownsampling[p] + fmt->data_offset[p]; + } + for (p = fmt->buffers; p < fmt->planes; p++) { + unsigned stride = dev->bytesperline_out[p]; +diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c +index f0626814d56b9..4df0d7a0cd118 100644 +--- a/drivers/misc/fastrpc.c ++++ b/drivers/misc/fastrpc.c +@@ -1912,7 +1912,8 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) + &args[0]); + if (err) { + dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size); +- goto err_invoke; ++ fastrpc_buf_free(buf); ++ return err; + } + + /* update the buffer to be able to deallocate the memory on the DSP */ +@@ -1950,8 +1951,6 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) + + err_assign: + fastrpc_req_munmap_impl(fl, buf); +-err_invoke: +- fastrpc_buf_free(buf); + + return err; + } +diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c +index 692daa9eff341..19c9d2cdd277b 100644 +--- a/drivers/misc/vmw_vmci/vmci_resource.c ++++ b/drivers/misc/vmw_vmci/vmci_resource.c +@@ -144,7 +144,8 @@ void vmci_resource_remove(struct vmci_resource *resource) + spin_lock(&vmci_resource_table.lock); + + hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) { +- if (vmci_handle_is_equal(r->handle, resource->handle)) { ++ if (vmci_handle_is_equal(r->handle, resource->handle) && ++ resource->type == r->type) { + hlist_del_init_rcu(&r->node); + break; + } +diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h +index cca71867bc4ad..92905fc46436d 100644 +--- a/drivers/mmc/core/quirks.h ++++ b/drivers/mmc/core/quirks.h +@@ -15,6 +15,19 @@ + + #include "card.h" + ++static const struct mmc_fixup __maybe_unused mmc_sd_fixups[] = { ++ /* ++ * Kingston Canvas Go! Plus microSD cards never finish SD cache flush. ++ * This has so far only been observed on cards from 11/2019, while new ++ * cards from 2023/05 do not exhibit this behavior. ++ */ ++ _FIXUP_EXT("SD64G", CID_MANFID_KINGSTON_SD, 0x5449, 2019, 11, ++ 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd, ++ MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY), ++ ++ END_FIXUP ++}; ++ + static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = { + #define INAND_CMD38_ARG_EXT_CSD 113 + #define INAND_CMD38_ARG_ERASE 0x00 +@@ -53,15 +66,6 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = { + MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_BLK_NO_CMD23), + +- /* +- * Kingston Canvas Go! Plus microSD cards never finish SD cache flush. +- * This has so far only been observed on cards from 11/2019, while new +- * cards from 2023/05 do not exhibit this behavior. +- */ +- _FIXUP_EXT("SD64G", CID_MANFID_KINGSTON_SD, 0x5449, 2019, 11, +- 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd, +- MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY), +- + /* + * Some SD cards lockup while using CMD23 multiblock transfers. + */ +diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c +index c3e554344c99f..240469a881a27 100644 +--- a/drivers/mmc/core/sd.c ++++ b/drivers/mmc/core/sd.c +@@ -26,6 +26,7 @@ + #include "host.h" + #include "bus.h" + #include "mmc_ops.h" ++#include "quirks.h" + #include "sd.h" + #include "sd_ops.h" + +@@ -1475,6 +1476,9 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, + goto free_card; + } + ++ /* Apply quirks prior to card setup */ ++ mmc_fixup_device(card, mmc_sd_fixups); ++ + err = mmc_sd_setup_card(host, card, oldcard != NULL); + if (err) + goto free_card; +diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c +index 41e94cd141098..fe7a4eac9595c 100644 +--- a/drivers/mmc/host/cqhci-core.c ++++ b/drivers/mmc/host/cqhci-core.c +@@ -612,7 +612,7 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq) + cqhci_writel(cq_host, 0, CQHCI_CTL); + mmc->cqe_on = true; + pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc)); +- if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) { ++ if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) { + pr_err("%s: cqhci: CQE failed to exit halt state\n", + mmc_hostname(mmc)); + } +diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c +index 02bee7afab37e..2f0bc79ef856a 100644 +--- a/drivers/mmc/host/dw_mmc.c ++++ b/drivers/mmc/host/dw_mmc.c +@@ -2952,8 +2952,8 @@ static int dw_mci_init_slot(struct dw_mci *host) + if (host->use_dma == TRANS_MODE_IDMAC) { + mmc->max_segs = host->ring_size; + mmc->max_blk_size = 65535; +- mmc->max_seg_size = 0x1000; +- mmc->max_req_size = mmc->max_seg_size * host->ring_size; ++ mmc->max_req_size = DW_MCI_DESC_DATA_LENGTH * host->ring_size; ++ mmc->max_seg_size = mmc->max_req_size; + mmc->max_blk_count = mmc->max_req_size / 512; + } else if (host->use_dma == TRANS_MODE_EDMAC) { + mmc->max_segs = 64; +diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c +index 42d54532cabe6..8379a0620c8fe 100644 +--- a/drivers/mmc/host/sdhci-of-aspeed.c ++++ b/drivers/mmc/host/sdhci-of-aspeed.c +@@ -510,6 +510,7 @@ static const struct of_device_id aspeed_sdhci_of_match[] = { + { .compatible = "aspeed,ast2600-sdhci", .data = &ast2600_sdhci_pdata, }, + { } + }; ++MODULE_DEVICE_TABLE(of, aspeed_sdhci_of_match); + + static struct platform_driver aspeed_sdhci_driver = { + .driver = { +diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c +index 683203f87ae2b..277493e41b072 100644 +--- a/drivers/net/bareudp.c ++++ b/drivers/net/bareudp.c +@@ -82,7 +82,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) + + if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion, + sizeof(ipversion))) { +- bareudp->dev->stats.rx_dropped++; ++ DEV_STATS_INC(bareudp->dev, rx_dropped); + goto drop; + } + ipversion >>= 4; +@@ -92,7 +92,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) + } else if (ipversion == 6 && bareudp->multi_proto_mode) { + proto = htons(ETH_P_IPV6); + } else { +- bareudp->dev->stats.rx_dropped++; ++ DEV_STATS_INC(bareudp->dev, rx_dropped); + goto drop; + } + } else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) { +@@ -106,7 +106,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) + ipv4_is_multicast(tunnel_hdr->daddr)) { + proto = htons(ETH_P_MPLS_MC); + } else { +- bareudp->dev->stats.rx_dropped++; ++ DEV_STATS_INC(bareudp->dev, rx_dropped); + goto drop; + } + } else { +@@ -122,7 +122,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) + (addr_type & IPV6_ADDR_MULTICAST)) { + proto = htons(ETH_P_MPLS_MC); + } else { +- bareudp->dev->stats.rx_dropped++; ++ DEV_STATS_INC(bareudp->dev, rx_dropped); + goto drop; + } + } +@@ -134,12 +134,12 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) + proto, + !net_eq(bareudp->net, + dev_net(bareudp->dev)))) { +- bareudp->dev->stats.rx_dropped++; ++ DEV_STATS_INC(bareudp->dev, rx_dropped); + goto drop; + } + tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0); + if (!tun_dst) { +- bareudp->dev->stats.rx_dropped++; ++ DEV_STATS_INC(bareudp->dev, rx_dropped); + goto drop; + } + skb_dst_set(skb, &tun_dst->dst); +@@ -165,8 +165,8 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) + &((struct ipv6hdr *)oiph)->saddr); + } + if (err > 1) { +- ++bareudp->dev->stats.rx_frame_errors; +- ++bareudp->dev->stats.rx_errors; ++ DEV_STATS_INC(bareudp->dev, rx_frame_errors); ++ DEV_STATS_INC(bareudp->dev, rx_errors); + goto drop; + } + } +@@ -462,11 +462,11 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev) + dev_kfree_skb(skb); + + if (err == -ELOOP) +- dev->stats.collisions++; ++ DEV_STATS_INC(dev, collisions); + else if (err == -ENETUNREACH) +- dev->stats.tx_carrier_errors++; ++ DEV_STATS_INC(dev, tx_carrier_errors); + +- dev->stats.tx_errors++; ++ DEV_STATS_INC(dev, tx_errors); + return NETDEV_TX_OK; + } + +diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c +index a57005faa04f5..c490b4ba065ba 100644 +--- a/drivers/net/can/kvaser_pciefd.c ++++ b/drivers/net/can/kvaser_pciefd.c +@@ -1580,23 +1580,15 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) + return res; + } + +-static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) ++static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) + { + u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); + +- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { ++ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) + kvaser_pciefd_read_buffer(pcie, 0); +- /* Reset DMA buffer 0 */ +- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, +- KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); +- } + +- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { ++ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) + kvaser_pciefd_read_buffer(pcie, 1); +- /* Reset DMA buffer 1 */ +- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, +- KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); +- } + + if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || + irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || +@@ -1605,6 +1597,7 @@ static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) + dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); + + iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); ++ return irq; + } + + static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) +@@ -1631,27 +1624,31 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) + { + struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; + const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask; +- u32 board_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); ++ u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); ++ u32 srb_irq = 0; ++ u32 srb_release = 0; + int i; + +- if (!(board_irq & irq_mask->all)) ++ if (!(pci_irq & irq_mask->all)) + return IRQ_NONE; + +- if (board_irq & irq_mask->kcan_rx0) +- kvaser_pciefd_receive_irq(pcie); ++ if (pci_irq & irq_mask->kcan_rx0) ++ srb_irq = kvaser_pciefd_receive_irq(pcie); + + for (i = 0; i < pcie->nr_channels; i++) { +- if (!pcie->can[i]) { +- dev_err(&pcie->pci->dev, +- "IRQ mask points to unallocated controller\n"); +- break; +- } +- +- /* Check that mask matches channel (i) IRQ mask */ +- if (board_irq & irq_mask->kcan_tx[i]) ++ if (pci_irq & irq_mask->kcan_tx[i]) + kvaser_pciefd_transmit_irq(pcie->can[i]); + } + ++ if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0) ++ srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0; ++ ++ if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1) ++ srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1; ++ ++ if (srb_release) ++ iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); ++ + return IRQ_HANDLED; + } + +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c +index 2395b1225cc8a..fb77fd74de27f 100644 +--- a/drivers/net/can/m_can/m_can.c ++++ b/drivers/net/can/m_can/m_can.c +@@ -1871,7 +1871,7 @@ static int m_can_open(struct net_device *dev) + /* start the m_can controller */ + err = m_can_start(dev); + if (err) +- goto exit_irq_fail; ++ goto exit_start_fail; + + if (!cdev->is_peripheral) + napi_enable(&cdev->napi); +@@ -1880,6 +1880,9 @@ static int m_can_open(struct net_device *dev) + + return 0; + ++exit_start_fail: ++ if (cdev->is_peripheral || dev->irq) ++ free_irq(dev->irq, dev); + exit_irq_fail: + if (cdev->is_peripheral) + destroy_workqueue(cdev->tx_wq); +diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c +index 79c4bab5f7246..8c56f85e87c1a 100644 +--- a/drivers/net/can/spi/mcp251x.c ++++ b/drivers/net/can/spi/mcp251x.c +@@ -753,7 +753,7 @@ static int mcp251x_hw_wake(struct spi_device *spi) + int ret; + + /* Force wakeup interrupt to wake device, but don't execute IST */ +- disable_irq(spi->irq); ++ disable_irq_nosync(spi->irq); + mcp251x_write_2regs(spi, CANINTE, CANINTE_WAKIE, CANINTF_WAKIF); + + /* Wait for oscillator startup timer after wake up */ +diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +index 1665f78abb5c9..a9bafa96e2f92 100644 +--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c ++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +@@ -2,7 +2,7 @@ + // + // mcp251xfd - Microchip MCP251xFD Family CAN controller driver + // +-// Copyright (c) 2019, 2020, 2021 Pengutronix, ++// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix, + // Marc Kleine-Budde + // + // Based on: +@@ -867,18 +867,18 @@ static int mcp251xfd_get_berr_counter(const struct net_device *ndev, + + static struct sk_buff * + mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv, +- struct can_frame **cf, u32 *timestamp) ++ struct can_frame **cf, u32 *ts_raw) + { + struct sk_buff *skb; + int err; + +- err = mcp251xfd_get_timestamp(priv, timestamp); ++ err = mcp251xfd_get_timestamp_raw(priv, ts_raw); + if (err) + return NULL; + + skb = alloc_can_err_skb(priv->ndev, cf); + if (skb) +- mcp251xfd_skb_set_timestamp(priv, skb, *timestamp); ++ mcp251xfd_skb_set_timestamp_raw(priv, skb, *ts_raw); + + return skb; + } +@@ -889,7 +889,7 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv) + struct mcp251xfd_rx_ring *ring; + struct sk_buff *skb; + struct can_frame *cf; +- u32 timestamp, rxovif; ++ u32 ts_raw, rxovif; + int err, i; + + stats->rx_over_errors++; +@@ -924,14 +924,14 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv) + return err; + } + +- skb = mcp251xfd_alloc_can_err_skb(priv, &cf, ×tamp); ++ skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw); + if (!skb) + return 0; + + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + +- err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); ++ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw); + if (err) + stats->rx_fifo_errors++; + +@@ -948,12 +948,12 @@ static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv) + static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv) + { + struct net_device_stats *stats = &priv->ndev->stats; +- u32 bdiag1, timestamp; ++ u32 bdiag1, ts_raw; + struct sk_buff *skb; + struct can_frame *cf = NULL; + int err; + +- err = mcp251xfd_get_timestamp(priv, ×tamp); ++ err = mcp251xfd_get_timestamp_raw(priv, &ts_raw); + if (err) + return err; + +@@ -1035,8 +1035,8 @@ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv) + if (!cf) + return 0; + +- mcp251xfd_skb_set_timestamp(priv, skb, timestamp); +- err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); ++ mcp251xfd_skb_set_timestamp_raw(priv, skb, ts_raw); ++ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw); + if (err) + stats->rx_fifo_errors++; + +@@ -1049,7 +1049,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv) + struct sk_buff *skb; + struct can_frame *cf = NULL; + enum can_state new_state, rx_state, tx_state; +- u32 trec, timestamp; ++ u32 trec, ts_raw; + int err; + + err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec); +@@ -1079,7 +1079,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv) + /* The skb allocation might fail, but can_change_state() + * handles cf == NULL. + */ +- skb = mcp251xfd_alloc_can_err_skb(priv, &cf, ×tamp); ++ skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw); + can_change_state(priv->ndev, cf, tx_state, rx_state); + + if (new_state == CAN_STATE_BUS_OFF) { +@@ -1110,7 +1110,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv) + cf->data[7] = bec.rxerr; + } + +- err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); ++ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw); + if (err) + stats->rx_fifo_errors++; + +diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c +index 9e8e82cdba461..61b0d6fa52dd8 100644 +--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c ++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c +@@ -97,7 +97,16 @@ void can_ram_get_layout(struct can_ram_layout *layout, + if (ring) { + u8 num_rx_coalesce = 0, num_tx_coalesce = 0; + +- num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, ring->rx_pending); ++ /* If the ring parameters have been configured in ++ * CAN-CC mode, but and we are in CAN-FD mode now, ++ * they might be to big. Use the default CAN-FD values ++ * in this case. ++ */ ++ num_rx = ring->rx_pending; ++ if (num_rx > layout->max_rx) ++ num_rx = layout->default_rx; ++ ++ num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx); + + /* The ethtool doc says: + * To disable coalescing, set usecs = 0 and max_frames = 1. +diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c +index 4cb79a4f24612..f72582d4d3e8e 100644 +--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c ++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c +@@ -206,6 +206,7 @@ mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr) + int i, j; + + mcp251xfd_for_each_rx_ring(priv, rx_ring, i) { ++ rx_ring->last_valid = timecounter_read(&priv->tc); + rx_ring->head = 0; + rx_ring->tail = 0; + rx_ring->base = *base; +@@ -468,11 +469,25 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv) + + /* switching from CAN-2.0 to CAN-FD mode or vice versa */ + if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) { ++ const struct ethtool_ringparam ring = { ++ .rx_pending = priv->rx_obj_num, ++ .tx_pending = priv->tx->obj_num, ++ }; ++ const struct ethtool_coalesce ec = { ++ .rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq, ++ .rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq, ++ .tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq, ++ .tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq, ++ }; + struct can_ram_layout layout; + +- can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode); +- priv->rx_obj_num = layout.default_rx; +- tx_ring->obj_num = layout.default_tx; ++ can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, &ec, fd_mode); ++ ++ priv->rx_obj_num = layout.cur_rx; ++ priv->rx_obj_num_coalesce_irq = layout.rx_coalesce; ++ ++ tx_ring->obj_num = layout.cur_tx; ++ priv->tx_obj_num_coalesce_irq = layout.tx_coalesce; + } + + if (fd_mode) { +@@ -509,6 +524,8 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv) + } + + rx_ring->obj_num = rx_obj_num; ++ rx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(rx_ring->obj_num_shift_to_u8) - ++ ilog2(rx_obj_num); + rx_ring->obj_size = rx_obj_size; + priv->rx[i] = rx_ring; + } +diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c +index ced8d9c81f8c6..fe897f3e4c12a 100644 +--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c ++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c +@@ -2,7 +2,7 @@ + // + // mcp251xfd - Microchip MCP251xFD Family CAN controller driver + // +-// Copyright (c) 2019, 2020, 2021 Pengutronix, ++// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix, + // Marc Kleine-Budde + // + // Based on: +@@ -16,23 +16,14 @@ + + #include "mcp251xfd.h" + +-static inline int +-mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv, +- const struct mcp251xfd_rx_ring *ring, +- u8 *rx_head, bool *fifo_empty) ++static inline bool mcp251xfd_rx_fifo_sta_empty(const u32 fifo_sta) + { +- u32 fifo_sta; +- int err; +- +- err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr), +- &fifo_sta); +- if (err) +- return err; +- +- *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); +- *fifo_empty = !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF); ++ return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF); ++} + +- return 0; ++static inline bool mcp251xfd_rx_fifo_sta_full(const u32 fifo_sta) ++{ ++ return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF; + } + + static inline int +@@ -80,29 +71,49 @@ mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv, + } + + static int +-mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv, +- struct mcp251xfd_rx_ring *ring) ++mcp251xfd_get_rx_len(const struct mcp251xfd_priv *priv, ++ const struct mcp251xfd_rx_ring *ring, ++ u8 *len_p) + { +- u32 new_head; +- u8 chip_rx_head; +- bool fifo_empty; ++ const u8 shift = ring->obj_num_shift_to_u8; ++ u8 chip_head, tail, len; ++ u32 fifo_sta; + int err; + +- err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head, +- &fifo_empty); +- if (err || fifo_empty) ++ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr), ++ &fifo_sta); ++ if (err) ++ return err; ++ ++ if (mcp251xfd_rx_fifo_sta_empty(fifo_sta)) { ++ *len_p = 0; ++ return 0; ++ } ++ ++ if (mcp251xfd_rx_fifo_sta_full(fifo_sta)) { ++ *len_p = ring->obj_num; ++ return 0; ++ } ++ ++ chip_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); ++ ++ err = mcp251xfd_check_rx_tail(priv, ring); ++ if (err) + return err; ++ tail = mcp251xfd_get_rx_tail(ring); + +- /* chip_rx_head, is the next RX-Object filled by the HW. +- * The new RX head must be >= the old head. ++ /* First shift to full u8. The subtraction works on signed ++ * values, that keeps the difference steady around the u8 ++ * overflow. The right shift acts on len, which is an u8. + */ +- new_head = round_down(ring->head, ring->obj_num) + chip_rx_head; +- if (new_head <= ring->head) +- new_head += ring->obj_num; ++ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(chip_head)); ++ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(tail)); ++ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(len)); + +- ring->head = new_head; ++ len = (chip_head << shift) - (tail << shift); ++ *len_p = len >> shift; + +- return mcp251xfd_check_rx_tail(priv, ring); ++ return 0; + } + + static void +@@ -148,8 +159,6 @@ mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv, + + if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)) + memcpy(cfd->data, hw_rx_obj->data, cfd->len); +- +- mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts); + } + + static int +@@ -160,8 +169,26 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv, + struct net_device_stats *stats = &priv->ndev->stats; + struct sk_buff *skb; + struct canfd_frame *cfd; ++ u64 timestamp; + int err; + ++ /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI ++ * bits of a FIFOSTA register, here the RX FIFO head index ++ * might be corrupted and we might process past the RX FIFO's ++ * head into old CAN frames. ++ * ++ * Compare the timestamp of currently processed CAN frame with ++ * last valid frame received. Abort with -EBADMSG if an old ++ * CAN frame is detected. ++ */ ++ timestamp = timecounter_cyc2time(&priv->tc, hw_rx_obj->ts); ++ if (timestamp <= ring->last_valid) { ++ stats->rx_fifo_errors++; ++ ++ return -EBADMSG; ++ } ++ ring->last_valid = timestamp; ++ + if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) + skb = alloc_canfd_skb(priv->ndev, &cfd); + else +@@ -172,6 +199,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv, + return 0; + } + ++ mcp251xfd_skb_set_timestamp(skb, timestamp); + mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, hw_rx_obj->ts); + if (err) +@@ -197,52 +225,81 @@ mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv, + return err; + } + ++static int ++mcp251xfd_handle_rxif_ring_uinc(const struct mcp251xfd_priv *priv, ++ struct mcp251xfd_rx_ring *ring, ++ u8 len) ++{ ++ int offset; ++ int err; ++ ++ if (!len) ++ return 0; ++ ++ ring->head += len; ++ ++ /* Increment the RX FIFO tail pointer 'len' times in a ++ * single SPI message. ++ * ++ * Note: ++ * Calculate offset, so that the SPI transfer ends on ++ * the last message of the uinc_xfer array, which has ++ * "cs_change == 0", to properly deactivate the chip ++ * select. ++ */ ++ offset = ARRAY_SIZE(ring->uinc_xfer) - len; ++ err = spi_sync_transfer(priv->spi, ++ ring->uinc_xfer + offset, len); ++ if (err) ++ return err; ++ ++ ring->tail += len; ++ ++ return 0; ++} ++ + static int + mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv, + struct mcp251xfd_rx_ring *ring) + { + struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj; +- u8 rx_tail, len; ++ u8 rx_tail, len, l; + int err, i; + +- err = mcp251xfd_rx_ring_update(priv, ring); ++ err = mcp251xfd_get_rx_len(priv, ring, &len); + if (err) + return err; + +- while ((len = mcp251xfd_get_rx_linear_len(ring))) { +- int offset; +- ++ while ((l = mcp251xfd_get_rx_linear_len(ring, len))) { + rx_tail = mcp251xfd_get_rx_tail(ring); + + err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj, +- rx_tail, len); ++ rx_tail, l); + if (err) + return err; + +- for (i = 0; i < len; i++) { ++ for (i = 0; i < l; i++) { + err = mcp251xfd_handle_rxif_one(priv, ring, + (void *)hw_rx_obj + + i * ring->obj_size); +- if (err) ++ ++ /* -EBADMSG means we're affected by mcp2518fd ++ * erratum DS80000789E 6., i.e. the timestamp ++ * in the RX object is older that the last ++ * valid received CAN frame. Don't process any ++ * further and mark processed frames as good. ++ */ ++ if (err == -EBADMSG) ++ return mcp251xfd_handle_rxif_ring_uinc(priv, ring, i); ++ else if (err) + return err; + } + +- /* Increment the RX FIFO tail pointer 'len' times in a +- * single SPI message. +- * +- * Note: +- * Calculate offset, so that the SPI transfer ends on +- * the last message of the uinc_xfer array, which has +- * "cs_change == 0", to properly deactivate the chip +- * select. +- */ +- offset = ARRAY_SIZE(ring->uinc_xfer) - len; +- err = spi_sync_transfer(priv->spi, +- ring->uinc_xfer + offset, len); ++ err = mcp251xfd_handle_rxif_ring_uinc(priv, ring, l); + if (err) + return err; + +- ring->tail += len; ++ len -= l; + } + + return 0; +diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c +index 5b0c7890d4b44..3886476a8f8ef 100644 +--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c ++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c +@@ -97,7 +97,7 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, + tef_tail = mcp251xfd_get_tef_tail(priv); + skb = priv->can.echo_skb[tef_tail]; + if (skb) +- mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts); ++ mcp251xfd_skb_set_timestamp_raw(priv, skb, hw_tef_obj->ts); + stats->tx_bytes += + can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload, + tef_tail, hw_tef_obj->ts, +diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c +index 712e091869870..1db99aabe85c5 100644 +--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c ++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c +@@ -2,7 +2,7 @@ + // + // mcp251xfd - Microchip MCP251xFD Family CAN controller driver + // +-// Copyright (c) 2021 Pengutronix, ++// Copyright (c) 2021, 2023 Pengutronix, + // Marc Kleine-Budde + // + +@@ -11,20 +11,20 @@ + + #include "mcp251xfd.h" + +-static u64 mcp251xfd_timestamp_read(const struct cyclecounter *cc) ++static u64 mcp251xfd_timestamp_raw_read(const struct cyclecounter *cc) + { + const struct mcp251xfd_priv *priv; +- u32 timestamp = 0; ++ u32 ts_raw = 0; + int err; + + priv = container_of(cc, struct mcp251xfd_priv, cc); +- err = mcp251xfd_get_timestamp(priv, ×tamp); ++ err = mcp251xfd_get_timestamp_raw(priv, &ts_raw); + if (err) + netdev_err(priv->ndev, + "Error %d while reading timestamp. HW timestamps may be inaccurate.", + err); + +- return timestamp; ++ return ts_raw; + } + + static void mcp251xfd_timestamp_work(struct work_struct *work) +@@ -39,21 +39,11 @@ static void mcp251xfd_timestamp_work(struct work_struct *work) + MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ); + } + +-void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv, +- struct sk_buff *skb, u32 timestamp) +-{ +- struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); +- u64 ns; +- +- ns = timecounter_cyc2time(&priv->tc, timestamp); +- hwtstamps->hwtstamp = ns_to_ktime(ns); +-} +- + void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv) + { + struct cyclecounter *cc = &priv->cc; + +- cc->read = mcp251xfd_timestamp_read; ++ cc->read = mcp251xfd_timestamp_raw_read; + cc->mask = CYCLECOUNTER_MASK(32); + cc->shift = 1; + cc->mult = clocksource_hz2mult(priv->can.clock.freq, cc->shift); +diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +index 4628bf847bc9b..991662fbba42e 100644 +--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h ++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +@@ -2,7 +2,7 @@ + * + * mcp251xfd - Microchip MCP251xFD Family CAN controller driver + * +- * Copyright (c) 2019, 2020, 2021 Pengutronix, ++ * Copyright (c) 2019, 2020, 2021, 2023 Pengutronix, + * Marc Kleine-Budde + * Copyright (c) 2019 Martin Sperl + */ +@@ -554,10 +554,14 @@ struct mcp251xfd_rx_ring { + unsigned int head; + unsigned int tail; + ++ /* timestamp of the last valid received CAN frame */ ++ u64 last_valid; ++ + u16 base; + u8 nr; + u8 fifo_nr; + u8 obj_num; ++ u8 obj_num_shift_to_u8; + u8 obj_size; + + union mcp251xfd_write_reg_buf irq_enable_buf; +@@ -811,10 +815,27 @@ mcp251xfd_spi_cmd_write(const struct mcp251xfd_priv *priv, + return data; + } + +-static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv, +- u32 *timestamp) ++static inline int mcp251xfd_get_timestamp_raw(const struct mcp251xfd_priv *priv, ++ u32 *ts_raw) ++{ ++ return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, ts_raw); ++} ++ ++static inline void mcp251xfd_skb_set_timestamp(struct sk_buff *skb, u64 ns) ++{ ++ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); ++ ++ hwtstamps->hwtstamp = ns_to_ktime(ns); ++} ++ ++static inline ++void mcp251xfd_skb_set_timestamp_raw(const struct mcp251xfd_priv *priv, ++ struct sk_buff *skb, u32 ts_raw) + { +- return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp); ++ u64 ns; ++ ++ ns = timecounter_cyc2time(&priv->tc, ts_raw); ++ mcp251xfd_skb_set_timestamp(skb, ns); + } + + static inline u16 mcp251xfd_get_tef_obj_addr(u8 n) +@@ -907,18 +928,9 @@ static inline u8 mcp251xfd_get_rx_tail(const struct mcp251xfd_rx_ring *ring) + return ring->tail & (ring->obj_num - 1); + } + +-static inline u8 mcp251xfd_get_rx_len(const struct mcp251xfd_rx_ring *ring) +-{ +- return ring->head - ring->tail; +-} +- + static inline u8 +-mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring) ++mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring, u8 len) + { +- u8 len; +- +- len = mcp251xfd_get_rx_len(ring); +- + return min_t(u8, len, ring->obj_num - mcp251xfd_get_rx_tail(ring)); + } + +@@ -944,8 +956,6 @@ void mcp251xfd_ring_free(struct mcp251xfd_priv *priv); + int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv); + int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv); + int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv); +-void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv, +- struct sk_buff *skb, u32 timestamp); + void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv); + void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv); + +diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c +index 23bd8b3f89931..a28bf5433ea72 100644 +--- a/drivers/net/dsa/vitesse-vsc73xx-core.c ++++ b/drivers/net/dsa/vitesse-vsc73xx-core.c +@@ -34,7 +34,7 @@ + #define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */ + #define VSC73XX_BLOCK_MII 0x3 /* Subblocks 0 and 1 */ + #define VSC73XX_BLOCK_MEMINIT 0x3 /* Only subblock 2 */ +-#define VSC73XX_BLOCK_CAPTURE 0x4 /* Only subblock 2 */ ++#define VSC73XX_BLOCK_CAPTURE 0x4 /* Subblocks 0-4, 6, 7 */ + #define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */ + #define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */ + +@@ -370,13 +370,19 @@ int vsc73xx_is_addr_valid(u8 block, u8 subblock) + break; + + case VSC73XX_BLOCK_MII: +- case VSC73XX_BLOCK_CAPTURE: + case VSC73XX_BLOCK_ARBITER: + switch (subblock) { + case 0 ... 1: + return 1; + } + break; ++ case VSC73XX_BLOCK_CAPTURE: ++ switch (subblock) { ++ case 0 ... 4: ++ case 6 ... 7: ++ return 1; ++ } ++ break; + } + + return 0; +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +index dcbc598b11c6c..c6a3eefd83bff 100644 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +@@ -931,14 +931,18 @@ static inline void dpaa_setup_egress(const struct dpaa_priv *priv, + } + } + +-static void dpaa_fq_setup(struct dpaa_priv *priv, +- const struct dpaa_fq_cbs *fq_cbs, +- struct fman_port *tx_port) ++static int dpaa_fq_setup(struct dpaa_priv *priv, ++ const struct dpaa_fq_cbs *fq_cbs, ++ struct fman_port *tx_port) + { + int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu; + const cpumask_t *affine_cpus = qman_affine_cpus(); +- u16 channels[NR_CPUS]; + struct dpaa_fq *fq; ++ u16 *channels; ++ ++ channels = kcalloc(num_possible_cpus(), sizeof(u16), GFP_KERNEL); ++ if (!channels) ++ return -ENOMEM; + + for_each_cpu_and(cpu, affine_cpus, cpu_online_mask) + channels[num_portals++] = qman_affine_channel(cpu); +@@ -997,6 +1001,10 @@ static void dpaa_fq_setup(struct dpaa_priv *priv, + break; + } + } ++ ++ kfree(channels); ++ ++ return 0; + } + + static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, +@@ -3416,7 +3424,9 @@ static int dpaa_eth_probe(struct platform_device *pdev) + */ + dpaa_eth_add_channel(priv->channel, &pdev->dev); + +- dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); ++ err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); ++ if (err) ++ goto free_dpaa_bps; + + /* Create a congestion group for this netdev, with + * dynamically-allocated CGR ID. +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +index 5bd0b36d1feb5..3f8cd4a7d8457 100644 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +@@ -457,12 +457,16 @@ static int dpaa_set_coalesce(struct net_device *dev, + struct netlink_ext_ack *extack) + { + const cpumask_t *cpus = qman_affine_cpus(); +- bool needs_revert[NR_CPUS] = {false}; + struct qman_portal *portal; + u32 period, prev_period; + u8 thresh, prev_thresh; ++ bool *needs_revert; + int cpu, res; + ++ needs_revert = kcalloc(num_possible_cpus(), sizeof(bool), GFP_KERNEL); ++ if (!needs_revert) ++ return -ENOMEM; ++ + period = c->rx_coalesce_usecs; + thresh = c->rx_max_coalesced_frames; + +@@ -485,6 +489,8 @@ static int dpaa_set_coalesce(struct net_device *dev, + needs_revert[cpu] = true; + } + ++ kfree(needs_revert); ++ + return 0; + + revert_values: +@@ -498,6 +504,8 @@ static int dpaa_set_coalesce(struct net_device *dev, + qman_dqrr_set_ithresh(portal, prev_thresh); + } + ++ kfree(needs_revert); ++ + return res; + } + +diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c +index 4d83c9a0c023a..f9e94be36e97f 100644 +--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c ++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c +@@ -2573,7 +2573,7 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) + hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), +- FIELD_GET(E1000_RAH_AV, mac_reg)); ++ (u16)((mac_reg & E1000_RAH_AV) >> 16)); + } + + e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); +diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h +index c7962f322db2d..7b3ce30ba38fa 100644 +--- a/drivers/net/ethernet/intel/ice/ice.h ++++ b/drivers/net/ethernet/intel/ice/ice.h +@@ -313,6 +313,7 @@ enum ice_vsi_state { + ICE_VSI_UMAC_FLTR_CHANGED, + ICE_VSI_MMAC_FLTR_CHANGED, + ICE_VSI_PROMISC_CHANGED, ++ ICE_VSI_REBUILD_PENDING, + ICE_VSI_STATE_NBITS /* must be last */ + }; + +@@ -409,6 +410,7 @@ struct ice_vsi { + struct ice_tx_ring **xdp_rings; /* XDP ring array */ + u16 num_xdp_txq; /* Used XDP queues */ + u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ ++ struct mutex xdp_state_lock; + + struct net_device **target_netdevs; + +diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c +index 13ca3342a0cea..b3010a53f1b45 100644 +--- a/drivers/net/ethernet/intel/ice/ice_lib.c ++++ b/drivers/net/ethernet/intel/ice/ice_lib.c +@@ -459,6 +459,7 @@ static void ice_vsi_free(struct ice_vsi *vsi) + + ice_vsi_free_stats(vsi); + ice_vsi_free_arrays(vsi); ++ mutex_destroy(&vsi->xdp_state_lock); + mutex_unlock(&pf->sw_mutex); + devm_kfree(dev, vsi); + } +@@ -660,6 +661,8 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf) + pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, + pf->next_vsi); + ++ mutex_init(&vsi->xdp_state_lock); ++ + unlock_pf: + mutex_unlock(&pf->sw_mutex); + return vsi; +@@ -3164,19 +3167,23 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) + if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) + return -EINVAL; + ++ mutex_lock(&vsi->xdp_state_lock); ++ + ret = ice_vsi_realloc_stat_arrays(vsi); + if (ret) +- goto err_vsi_cfg; ++ goto unlock; + + ice_vsi_decfg(vsi); + ret = ice_vsi_cfg_def(vsi, ¶ms); + if (ret) +- goto err_vsi_cfg; ++ goto unlock; + + coalesce = kcalloc(vsi->num_q_vectors, + sizeof(struct ice_coalesce_stored), GFP_KERNEL); +- if (!coalesce) +- return -ENOMEM; ++ if (!coalesce) { ++ ret = -ENOMEM; ++ goto decfg; ++ } + + prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce); + +@@ -3184,22 +3191,23 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) + if (ret) { + if (vsi_flags & ICE_VSI_FLAG_INIT) { + ret = -EIO; +- goto err_vsi_cfg_tc_lan; ++ goto free_coalesce; + } + +- kfree(coalesce); +- return ice_schedule_reset(pf, ICE_RESET_PFR); ++ ret = ice_schedule_reset(pf, ICE_RESET_PFR); ++ goto free_coalesce; + } + + ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors); +- kfree(coalesce); ++ clear_bit(ICE_VSI_REBUILD_PENDING, vsi->state); + +- return 0; +- +-err_vsi_cfg_tc_lan: +- ice_vsi_decfg(vsi); ++free_coalesce: + kfree(coalesce); +-err_vsi_cfg: ++decfg: ++ if (ret) ++ ice_vsi_decfg(vsi); ++unlock: ++ mutex_unlock(&vsi->xdp_state_lock); + return ret; + } + +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c +index b168a37a5dfff..4d3a9fc79a6c1 100644 +--- a/drivers/net/ethernet/intel/ice/ice_main.c ++++ b/drivers/net/ethernet/intel/ice/ice_main.c +@@ -606,11 +606,15 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) + memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt)); + } + } ++ ++ if (vsi->netdev) ++ netif_device_detach(vsi->netdev); + skip: + + /* clear SW filtering DB */ + ice_clear_hw_tbls(hw); + /* disable the VSIs and their queues that are not already DOWN */ ++ set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state); + ice_pf_dis_all_vsi(pf, false); + + if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) +@@ -2927,8 +2931,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, + struct netlink_ext_ack *extack) + { + unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; +- bool if_running = netif_running(vsi->netdev); + int ret = 0, xdp_ring_err = 0; ++ bool if_running; + + if (prog && !prog->aux->xdp_has_frags) { + if (frame_size > ice_max_xdp_frame_size(vsi)) { +@@ -2939,13 +2943,17 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, + } + + /* hot swap progs and avoid toggling link */ +- if (ice_is_xdp_ena_vsi(vsi) == !!prog) { ++ if (ice_is_xdp_ena_vsi(vsi) == !!prog || ++ test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) { + ice_vsi_assign_bpf_prog(vsi, prog); + return 0; + } + ++ if_running = netif_running(vsi->netdev) && ++ !test_and_set_bit(ICE_VSI_DOWN, vsi->state); ++ + /* need to stop netdev while setting up the program for Rx rings */ +- if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { ++ if (if_running) { + ret = ice_down(vsi); + if (ret) { + NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); +@@ -3011,21 +3019,28 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) + { + struct ice_netdev_priv *np = netdev_priv(dev); + struct ice_vsi *vsi = np->vsi; ++ int ret; + + if (vsi->type != ICE_VSI_PF) { + NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); + return -EINVAL; + } + ++ mutex_lock(&vsi->xdp_state_lock); ++ + switch (xdp->command) { + case XDP_SETUP_PROG: +- return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); ++ ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); ++ break; + case XDP_SETUP_XSK_POOL: +- return ice_xsk_pool_setup(vsi, xdp->xsk.pool, +- xdp->xsk.queue_id); ++ ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id); ++ break; + default: +- return -EINVAL; ++ ret = -EINVAL; + } ++ ++ mutex_unlock(&vsi->xdp_state_lock); ++ return ret; + } + + /** +@@ -3979,13 +3994,17 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) + + /* set for the next time the netdev is started */ + if (!netif_running(vsi->netdev)) { +- ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); ++ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); ++ if (err) ++ goto rebuild_err; + dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); + goto done; + } + + ice_vsi_close(vsi); +- ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); ++ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); ++ if (err) ++ goto rebuild_err; + + ice_for_each_traffic_class(i) { + if (vsi->tc_cfg.ena_tc & BIT(i)) +@@ -3996,6 +4015,11 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) + } + ice_pf_dcb_recfg(pf, locked); + ice_vsi_open(vsi); ++ goto done; ++ ++rebuild_err: ++ dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n", ++ err); + done: + clear_bit(ICE_CFG_BUSY, pf->state); + return err; +@@ -7286,6 +7310,7 @@ static void ice_update_pf_netdev_link(struct ice_pf *pf) + */ + static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) + { ++ struct ice_vsi *vsi = ice_get_main_vsi(pf); + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + bool dvm; +@@ -7438,6 +7463,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) + ice_rebuild_arfs(pf); + } + ++ if (vsi && vsi->netdev) ++ netif_device_attach(vsi->netdev); ++ + ice_update_pf_netdev_link(pf); + + /* tell the firmware we are up */ +diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c +index 67511153081ae..9a9b8698881b4 100644 +--- a/drivers/net/ethernet/intel/ice/ice_xsk.c ++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c +@@ -396,7 +396,8 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) + goto failure; + } + +- if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi); ++ if_running = !test_bit(ICE_VSI_DOWN, vsi->state) && ++ ice_is_xdp_ena_vsi(vsi); + + if (if_running) { + struct ice_rx_ring *rx_ring = vsi->rx_rings[qid]; +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c +index 8c8894ef33886..fa268d7bd1bc3 100644 +--- a/drivers/net/ethernet/intel/igb/igb_main.c ++++ b/drivers/net/ethernet/intel/igb/igb_main.c +@@ -6985,10 +6985,20 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt) + + static void igb_tsync_interrupt(struct igb_adapter *adapter) + { ++ const u32 mask = (TSINTR_SYS_WRAP | E1000_TSICR_TXTS | ++ TSINTR_TT0 | TSINTR_TT1 | ++ TSINTR_AUTT0 | TSINTR_AUTT1); + struct e1000_hw *hw = &adapter->hw; + u32 tsicr = rd32(E1000_TSICR); + struct ptp_clock_event event; + ++ if (hw->mac.type == e1000_82580) { ++ /* 82580 has a hardware bug that requires an explicit ++ * write to clear the TimeSync interrupt cause. ++ */ ++ wr32(E1000_TSICR, tsicr & mask); ++ } ++ + if (tsicr & TSINTR_SYS_WRAP) { + event.type = PTP_CLOCK_PPS; + if (adapter->ptp_caps.pps) +diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c +index 21fb1a98ebca6..da1018d832622 100644 +--- a/drivers/net/ethernet/intel/igc/igc_main.c ++++ b/drivers/net/ethernet/intel/igc/igc_main.c +@@ -7288,6 +7288,7 @@ static void igc_io_resume(struct pci_dev *pdev) + rtnl_lock(); + if (netif_running(netdev)) { + if (igc_open(netdev)) { ++ rtnl_unlock(); + netdev_err(netdev, "igc_open failed after reset\n"); + return; + } +diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c +index fe4e166de8a04..79276bc3d4951 100644 +--- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c ++++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c +@@ -1442,18 +1442,8 @@ static void vcap_api_encode_rule_test(struct kunit *test) + vcap_enable_lookups(&test_vctrl, &test_netdev, 0, 0, + rule->cookie, false); + +- vcap_free_rule(rule); +- +- /* Check that the rule has been freed: tricky to access since this +- * memory should not be accessible anymore +- */ +- KUNIT_EXPECT_PTR_NE(test, NULL, rule); +- ret = list_empty(&rule->keyfields); +- KUNIT_EXPECT_EQ(test, true, ret); +- ret = list_empty(&rule->actionfields); +- KUNIT_EXPECT_EQ(test, true, ret); +- +- vcap_del_rule(&test_vctrl, &test_netdev, id); ++ ret = vcap_del_rule(&test_vctrl, &test_netdev, id); ++ KUNIT_EXPECT_EQ(test, 0, ret); + } + + static void vcap_api_set_rule_counter_test(struct kunit *test) +diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c +index 765c5fc158f8f..d8cce3771af21 100644 +--- a/drivers/net/ethernet/microsoft/mana/mana_en.c ++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c +@@ -1858,10 +1858,12 @@ static void mana_destroy_txq(struct mana_port_context *apc) + + for (i = 0; i < apc->num_queues; i++) { + napi = &apc->tx_qp[i].tx_cq.napi; +- napi_synchronize(napi); +- napi_disable(napi); +- netif_napi_del(napi); +- ++ if (apc->tx_qp[i].txq.napi_initialized) { ++ napi_synchronize(napi); ++ napi_disable(napi); ++ netif_napi_del(napi); ++ apc->tx_qp[i].txq.napi_initialized = false; ++ } + mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object); + + mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq); +@@ -1917,6 +1919,7 @@ static int mana_create_txq(struct mana_port_context *apc, + txq->ndev = net; + txq->net_txq = netdev_get_tx_queue(net, i); + txq->vp_offset = apc->tx_vp_offset; ++ txq->napi_initialized = false; + skb_queue_head_init(&txq->pending_skbs); + + memset(&spec, 0, sizeof(spec)); +@@ -1983,6 +1986,7 @@ static int mana_create_txq(struct mana_port_context *apc, + + netif_napi_add_tx(net, &cq->napi, mana_poll); + napi_enable(&cq->napi); ++ txq->napi_initialized = true; + + mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); + } +@@ -1994,7 +1998,7 @@ static int mana_create_txq(struct mana_port_context *apc, + } + + static void mana_destroy_rxq(struct mana_port_context *apc, +- struct mana_rxq *rxq, bool validate_state) ++ struct mana_rxq *rxq, bool napi_initialized) + + { + struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; +@@ -2009,15 +2013,15 @@ static void mana_destroy_rxq(struct mana_port_context *apc, + + napi = &rxq->rx_cq.napi; + +- if (validate_state) ++ if (napi_initialized) { + napi_synchronize(napi); + +- napi_disable(napi); ++ napi_disable(napi); + ++ netif_napi_del(napi); ++ } + xdp_rxq_info_unreg(&rxq->xdp_rxq); + +- netif_napi_del(napi); +- + mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); + + mana_deinit_cq(apc, &rxq->rx_cq); +diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c +index 5bf6fdff701cd..346e6ad36054e 100644 +--- a/drivers/net/mctp/mctp-serial.c ++++ b/drivers/net/mctp/mctp-serial.c +@@ -91,8 +91,8 @@ static int next_chunk_len(struct mctp_serial *dev) + * will be those non-escaped bytes, and does not include the escaped + * byte. + */ +- for (i = 1; i + dev->txpos + 1 < dev->txlen; i++) { +- if (needs_escape(dev->txbuf[dev->txpos + i + 1])) ++ for (i = 1; i + dev->txpos < dev->txlen; i++) { ++ if (needs_escape(dev->txbuf[dev->txpos + i])) + break; + } + +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c +index c895cd178e6a1..2e4bff6055e22 100644 +--- a/drivers/net/phy/phy_device.c ++++ b/drivers/net/phy/phy_device.c +@@ -3164,11 +3164,13 @@ static int of_phy_leds(struct phy_device *phydev) + err = of_phy_led(phydev, led); + if (err) { + of_node_put(led); ++ of_node_put(leds); + phy_leds_unregister(phydev); + return err; + } + } + ++ of_node_put(leds); + return 0; + } + +diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c +index 687d70cfc5563..6eeef10edadad 100644 +--- a/drivers/net/usb/ipheth.c ++++ b/drivers/net/usb/ipheth.c +@@ -475,8 +475,8 @@ static int ipheth_close(struct net_device *net) + { + struct ipheth_device *dev = netdev_priv(net); + +- cancel_delayed_work_sync(&dev->carrier_work); + netif_stop_queue(net); ++ cancel_delayed_work_sync(&dev->carrier_work); + return 0; + } + +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c +index 127b34dcc5b37..ce19ebd180f12 100644 +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -5143,14 +5143,23 @@ static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac) + data = (u8 *)mac; + data += __le16_to_cpu(mac->fw_offset); + +- generic_ocp_write(tp, __le16_to_cpu(mac->fw_reg), 0xff, length, data, +- type); ++ if (generic_ocp_write(tp, __le16_to_cpu(mac->fw_reg), 0xff, length, ++ data, type) < 0) { ++ dev_err(&tp->intf->dev, "Write %s fw fail\n", ++ type ? "PLA" : "USB"); ++ return; ++ } + + ocp_write_word(tp, type, __le16_to_cpu(mac->bp_ba_addr), + __le16_to_cpu(mac->bp_ba_value)); + +- generic_ocp_write(tp, __le16_to_cpu(mac->bp_start), BYTE_EN_DWORD, +- __le16_to_cpu(mac->bp_num) << 1, mac->bp, type); ++ if (generic_ocp_write(tp, __le16_to_cpu(mac->bp_start), BYTE_EN_DWORD, ++ ALIGN(__le16_to_cpu(mac->bp_num) << 1, 4), ++ mac->bp, type) < 0) { ++ dev_err(&tp->intf->dev, "Write %s bp fail\n", ++ type ? "PLA" : "USB"); ++ return; ++ } + + bp_en_addr = __le16_to_cpu(mac->bp_en_addr); + if (bp_en_addr) +diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c +index 2d14b0d78541a..6cc1b56ddde2f 100644 +--- a/drivers/net/usb/usbnet.c ++++ b/drivers/net/usb/usbnet.c +@@ -61,9 +61,6 @@ + + /*-------------------------------------------------------------------------*/ + +-// randomly generated ethernet address +-static u8 node_id [ETH_ALEN]; +- + /* use ethtool to change the level for any given device */ + static int msg_level = -1; + module_param (msg_level, int, 0); +@@ -1731,7 +1728,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) + + dev->net = net; + strscpy(net->name, "usb%d", sizeof(net->name)); +- eth_hw_addr_set(net, node_id); + + /* rx and tx sides can use different message sizes; + * bind() should set rx_urb_size in that case. +@@ -1805,9 +1801,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) + goto out4; + } + +- /* let userspace know we have a random address */ +- if (ether_addr_equal(net->dev_addr, node_id)) +- net->addr_assign_type = NET_ADDR_RANDOM; ++ /* this flags the device for user space */ ++ if (!is_valid_ether_addr(net->dev_addr)) ++ eth_hw_addr_random(net); + + if ((dev->driver_info->flags & FLAG_WLAN) != 0) + SET_NETDEV_DEVTYPE(net, &wlan_type); +@@ -2217,7 +2213,6 @@ static int __init usbnet_init(void) + BUILD_BUG_ON( + sizeof_field(struct sk_buff, cb) < sizeof(struct skb_data)); + +- eth_random_addr(node_id); + return 0; + } + module_init(usbnet_init); +diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c +index ba6fc27f4a1a1..dd2a7c95517be 100644 +--- a/drivers/net/wireless/ath/ath12k/mac.c ++++ b/drivers/net/wireless/ath/ath12k/mac.c +@@ -1614,7 +1614,9 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar, + { + const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; + int i; +- u8 ampdu_factor, rx_mcs_80, rx_mcs_160, max_nss; ++ u8 ampdu_factor, max_nss; ++ u8 rx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED; ++ u8 rx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED; + u16 mcs_160_map, mcs_80_map; + bool support_160; + u16 v; +@@ -3355,6 +3357,11 @@ static int ath12k_station_assoc(struct ath12k *ar, + + ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc); + ++ if (peer_arg.peer_nss < 1) { ++ ath12k_warn(ar->ab, ++ "invalid peer NSS %d\n", peer_arg.peer_nss); ++ return -EINVAL; ++ } + ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); + if (ret) { + ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +index 543e93ec49d22..9ab669487de4d 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +@@ -1086,6 +1086,7 @@ static int ieee_hw_init(struct ieee80211_hw *hw) + ieee80211_hw_set(hw, AMPDU_AGGREGATION); + ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); ++ ieee80211_hw_set(hw, MFP_CAPABLE); + + hw->extra_tx_headroom = brcms_c_get_header_len(); + hw->queues = N_TX_QUEUES; +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +index c780e5ffcd596..bace9d01fd583 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +@@ -1318,7 +1318,8 @@ iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu) + static inline struct ieee80211_bss_conf * + iwl_mvm_rcu_fw_link_id_to_link_conf(struct iwl_mvm *mvm, u8 link_id, bool rcu) + { +- if (WARN_ON(link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf))) ++ if (IWL_FW_CHECK(mvm, link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf), ++ "erroneous FW link ID: %d\n", link_id)) + return NULL; + + if (rcu) +diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h +index 7bdec6c622481..dc6b4cf616bea 100644 +--- a/drivers/net/wireless/marvell/mwifiex/main.h ++++ b/drivers/net/wireless/marvell/mwifiex/main.h +@@ -1290,6 +1290,9 @@ mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter, + + for (i = 0; i < adapter->priv_num; i++) { + if (adapter->priv[i]) { ++ if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED) ++ continue; ++ + if ((adapter->priv[i]->bss_num == bss_num) && + (adapter->priv[i]->bss_type == bss_type)) + break; +diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c +index efd0c2915a051..04a64afcbf8a2 100644 +--- a/drivers/net/wireless/realtek/rtw88/usb.c ++++ b/drivers/net/wireless/realtek/rtw88/usb.c +@@ -742,7 +742,6 @@ static struct rtw_hci_ops rtw_usb_ops = { + static int rtw_usb_init_rx(struct rtw_dev *rtwdev) + { + struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); +- int i; + + rtwusb->rxwq = create_singlethread_workqueue("rtw88_usb: rx wq"); + if (!rtwusb->rxwq) { +@@ -754,13 +753,19 @@ static int rtw_usb_init_rx(struct rtw_dev *rtwdev) + + INIT_WORK(&rtwusb->rx_work, rtw_usb_rx_handler); + ++ return 0; ++} ++ ++static void rtw_usb_setup_rx(struct rtw_dev *rtwdev) ++{ ++ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); ++ int i; ++ + for (i = 0; i < RTW_USB_RXCB_NUM; i++) { + struct rx_usb_ctrl_block *rxcb = &rtwusb->rx_cb[i]; + + rtw_usb_rx_resubmit(rtwusb, rxcb); + } +- +- return 0; + } + + static void rtw_usb_deinit_rx(struct rtw_dev *rtwdev) +@@ -897,6 +902,8 @@ int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) + goto err_destroy_rxwq; + } + ++ rtw_usb_setup_rx(rtwdev); ++ + return 0; + + err_destroy_rxwq: +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index 2c3f55877a113..7fc1ab4d9e7d8 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -2471,6 +2471,12 @@ static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev) + + static void nvme_pci_update_nr_queues(struct nvme_dev *dev) + { ++ if (!dev->ctrl.tagset) { ++ nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, ++ nvme_pci_nr_maps(dev), sizeof(struct nvme_iod)); ++ return; ++ } ++ + blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); + /* free previously allocated queues that are no longer usable */ + nvme_free_queues(dev, dev->online_queues); +@@ -2929,6 +2935,17 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) + dmi_match(DMI_BOARD_NAME, "NS5x_7xPU") || + dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1")) + return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND; ++ } else if (pdev->vendor == 0x144d && pdev->device == 0xa80d) { ++ /* ++ * Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND ++ * because of high power consumption (> 2 Watt) in s2idle ++ * sleep. Only some boards with Intel CPU are affected. ++ */ ++ if (dmi_match(DMI_BOARD_NAME, "GMxPXxx") || ++ dmi_match(DMI_BOARD_NAME, "PH4PG31") || ++ dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") || ++ dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71")) ++ return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND; + } + + /* +diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c +index c65a1f4421f60..bd142aed20f45 100644 +--- a/drivers/nvme/target/tcp.c ++++ b/drivers/nvme/target/tcp.c +@@ -1859,8 +1859,10 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq) + } + + queue->nr_cmds = sq->size * 2; +- if (nvmet_tcp_alloc_cmds(queue)) ++ if (nvmet_tcp_alloc_cmds(queue)) { ++ queue->nr_cmds = 0; + return NVME_SC_INTERNAL; ++ } + return 0; + } + +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c +index 040dfa01fa12e..e7fd1315d7edc 100644 +--- a/drivers/nvmem/core.c ++++ b/drivers/nvmem/core.c +@@ -1253,13 +1253,13 @@ void nvmem_device_put(struct nvmem_device *nvmem) + EXPORT_SYMBOL_GPL(nvmem_device_put); + + /** +- * devm_nvmem_device_get() - Get nvmem cell of device form a given id ++ * devm_nvmem_device_get() - Get nvmem device of device form a given id + * + * @dev: Device that requests the nvmem device. + * @id: name id for the requested nvmem device. + * +- * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell +- * on success. The nvmem_cell will be freed by the automatically once the ++ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device ++ * on success. The nvmem_device will be freed by the automatically once the + * device is freed. + */ + struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) +diff --git a/drivers/of/irq.c b/drivers/of/irq.c +index c94203ce65bb3..8fd63100ba8f0 100644 +--- a/drivers/of/irq.c ++++ b/drivers/of/irq.c +@@ -344,7 +344,8 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar + struct device_node *p; + const __be32 *addr; + u32 intsize; +- int i, res; ++ int i, res, addr_len; ++ __be32 addr_buf[3] = { 0 }; + + pr_debug("of_irq_parse_one: dev=%pOF, index=%d\n", device, index); + +@@ -353,13 +354,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar + return of_irq_parse_oldworld(device, index, out_irq); + + /* Get the reg property (if any) */ +- addr = of_get_property(device, "reg", NULL); ++ addr = of_get_property(device, "reg", &addr_len); ++ ++ /* Prevent out-of-bounds read in case of longer interrupt parent address size */ ++ if (addr_len > (3 * sizeof(__be32))) ++ addr_len = 3 * sizeof(__be32); ++ if (addr) ++ memcpy(addr_buf, addr, addr_len); + + /* Try the new-style interrupts-extended first */ + res = of_parse_phandle_with_args(device, "interrupts-extended", + "#interrupt-cells", index, out_irq); + if (!res) +- return of_irq_parse_raw(addr, out_irq); ++ return of_irq_parse_raw(addr_buf, out_irq); + + /* Look for the interrupt parent. */ + p = of_irq_find_parent(device); +@@ -389,7 +396,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar + + + /* Check if there are any interrupt-map translations to process */ +- res = of_irq_parse_raw(addr, out_irq); ++ res = of_irq_parse_raw(addr_buf, out_irq); + out: + of_node_put(p); + return res; +diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c +index 54a3c7f29f78a..c1dedc83759c6 100644 +--- a/drivers/pci/controller/dwc/pci-keystone.c ++++ b/drivers/pci/controller/dwc/pci-keystone.c +@@ -34,6 +34,11 @@ + #define PCIE_DEVICEID_SHIFT 16 + + /* Application registers */ ++#define PID 0x000 ++#define RTL GENMASK(15, 11) ++#define RTL_SHIFT 11 ++#define AM6_PCI_PG1_RTL_VER 0x15 ++ + #define CMD_STATUS 0x004 + #define LTSSM_EN_VAL BIT(0) + #define OB_XLAT_EN_VAL BIT(1) +@@ -104,6 +109,8 @@ + + #define to_keystone_pcie(x) dev_get_drvdata((x)->dev) + ++#define PCI_DEVICE_ID_TI_AM654X 0xb00c ++ + struct ks_pcie_of_data { + enum dw_pcie_device_mode mode; + const struct dw_pcie_host_ops *host_ops; +@@ -518,7 +525,11 @@ static int ks_pcie_start_link(struct dw_pcie *pci) + static void ks_pcie_quirk(struct pci_dev *dev) + { + struct pci_bus *bus = dev->bus; ++ struct keystone_pcie *ks_pcie; ++ struct device *bridge_dev; + struct pci_dev *bridge; ++ u32 val; ++ + static const struct pci_device_id rc_pci_devids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK), + .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, +@@ -530,6 +541,11 @@ static void ks_pcie_quirk(struct pci_dev *dev) + .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, + { 0, }, + }; ++ static const struct pci_device_id am6_pci_devids[] = { ++ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X), ++ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, ++ { 0, }, ++ }; + + if (pci_is_root_bus(bus)) + bridge = dev; +@@ -551,10 +567,36 @@ static void ks_pcie_quirk(struct pci_dev *dev) + */ + if (pci_match_id(rc_pci_devids, bridge)) { + if (pcie_get_readrq(dev) > 256) { +- dev_info(&dev->dev, "limiting MRRS to 256\n"); ++ dev_info(&dev->dev, "limiting MRRS to 256 bytes\n"); + pcie_set_readrq(dev, 256); + } + } ++ ++ /* ++ * Memory transactions fail with PCI controller in AM654 PG1.0 ++ * when MRRS is set to more than 128 bytes. Force the MRRS to ++ * 128 bytes in all downstream devices. ++ */ ++ if (pci_match_id(am6_pci_devids, bridge)) { ++ bridge_dev = pci_get_host_bridge_device(dev); ++ if (!bridge_dev && !bridge_dev->parent) ++ return; ++ ++ ks_pcie = dev_get_drvdata(bridge_dev->parent); ++ if (!ks_pcie) ++ return; ++ ++ val = ks_pcie_app_readl(ks_pcie, PID); ++ val &= RTL; ++ val >>= RTL_SHIFT; ++ if (val != AM6_PCI_PG1_RTL_VER) ++ return; ++ ++ if (pcie_get_readrq(dev) > 128) { ++ dev_info(&dev->dev, "limiting MRRS to 128 bytes\n"); ++ pcie_set_readrq(dev, 128); ++ } ++ } + } + DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk); + +diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c +index 881d420637bf1..092c9ac0d26d2 100644 +--- a/drivers/pci/hotplug/pnv_php.c ++++ b/drivers/pci/hotplug/pnv_php.c +@@ -39,7 +39,6 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot, + bool disable_device) + { + struct pci_dev *pdev = php_slot->pdev; +- int irq = php_slot->irq; + u16 ctrl; + + if (php_slot->irq > 0) { +@@ -58,7 +57,7 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot, + php_slot->wq = NULL; + } + +- if (disable_device || irq > 0) { ++ if (disable_device) { + if (pdev->msix_enabled) + pci_disable_msix(pdev); + else if (pdev->msi_enabled) +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index a0f961a380fa9..53e9e9788bd54 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -5718,10 +5718,12 @@ static void pci_bus_lock(struct pci_bus *bus) + { + struct pci_dev *dev; + ++ pci_dev_lock(bus->self); + list_for_each_entry(dev, &bus->devices, bus_list) { +- pci_dev_lock(dev); + if (dev->subordinate) + pci_bus_lock(dev->subordinate); ++ else ++ pci_dev_lock(dev); + } + } + +@@ -5733,8 +5735,10 @@ static void pci_bus_unlock(struct pci_bus *bus) + list_for_each_entry(dev, &bus->devices, bus_list) { + if (dev->subordinate) + pci_bus_unlock(dev->subordinate); +- pci_dev_unlock(dev); ++ else ++ pci_dev_unlock(dev); + } ++ pci_dev_unlock(bus->self); + } + + /* Return 1 on successful lock, 0 on contention */ +@@ -5742,15 +5746,15 @@ static int pci_bus_trylock(struct pci_bus *bus) + { + struct pci_dev *dev; + ++ if (!pci_dev_trylock(bus->self)) ++ return 0; ++ + list_for_each_entry(dev, &bus->devices, bus_list) { +- if (!pci_dev_trylock(dev)) +- goto unlock; + if (dev->subordinate) { +- if (!pci_bus_trylock(dev->subordinate)) { +- pci_dev_unlock(dev); ++ if (!pci_bus_trylock(dev->subordinate)) + goto unlock; +- } +- } ++ } else if (!pci_dev_trylock(dev)) ++ goto unlock; + } + return 1; + +@@ -5758,8 +5762,10 @@ static int pci_bus_trylock(struct pci_bus *bus) + list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) { + if (dev->subordinate) + pci_bus_unlock(dev->subordinate); +- pci_dev_unlock(dev); ++ else ++ pci_dev_unlock(dev); + } ++ pci_dev_unlock(bus->self); + return 0; + } + +@@ -5791,9 +5797,10 @@ static void pci_slot_lock(struct pci_slot *slot) + list_for_each_entry(dev, &slot->bus->devices, bus_list) { + if (!dev->slot || dev->slot != slot) + continue; +- pci_dev_lock(dev); + if (dev->subordinate) + pci_bus_lock(dev->subordinate); ++ else ++ pci_dev_lock(dev); + } + } + +@@ -5819,14 +5826,13 @@ static int pci_slot_trylock(struct pci_slot *slot) + list_for_each_entry(dev, &slot->bus->devices, bus_list) { + if (!dev->slot || dev->slot != slot) + continue; +- if (!pci_dev_trylock(dev)) +- goto unlock; + if (dev->subordinate) { + if (!pci_bus_trylock(dev->subordinate)) { + pci_dev_unlock(dev); + goto unlock; + } +- } ++ } else if (!pci_dev_trylock(dev)) ++ goto unlock; + } + return 1; + +@@ -5837,7 +5843,8 @@ static int pci_slot_trylock(struct pci_slot *slot) + continue; + if (dev->subordinate) + pci_bus_unlock(dev->subordinate); +- pci_dev_unlock(dev); ++ else ++ pci_dev_unlock(dev); + } + return 0; + } +diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c +index 1365eaa20ff49..ff169124929cc 100644 +--- a/drivers/pcmcia/yenta_socket.c ++++ b/drivers/pcmcia/yenta_socket.c +@@ -638,11 +638,11 @@ static int yenta_search_one_res(struct resource *root, struct resource *res, + start = PCIBIOS_MIN_CARDBUS_IO; + end = ~0U; + } else { +- unsigned long avail = root->end - root->start; ++ unsigned long avail = resource_size(root); + int i; + size = BRIDGE_MEM_MAX; +- if (size > avail/8) { +- size = (avail+1)/8; ++ if (size > (avail - 1) / 8) { ++ size = avail / 8; + /* round size down to next power of 2 */ + i = 0; + while ((size /= 2) != 0) +diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c +index 8c8b1ca31e4c4..c72b52955a867 100644 +--- a/drivers/phy/xilinx/phy-zynqmp.c ++++ b/drivers/phy/xilinx/phy-zynqmp.c +@@ -846,6 +846,7 @@ static struct phy *xpsgtr_xlate(struct device *dev, + phy_type = args->args[1]; + phy_instance = args->args[2]; + ++ guard(mutex)(>r_phy->phy->mutex); + ret = xpsgtr_set_lane_type(gtr_phy, phy_type, phy_instance); + if (ret < 0) { + dev_err(gtr_dev->dev, "Invalid PHY type and/or instance\n"); +diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c +index 86b95206cb1bd..6fb538a138689 100644 +--- a/drivers/platform/x86/dell/dell-smbios-base.c ++++ b/drivers/platform/x86/dell/dell-smbios-base.c +@@ -590,7 +590,10 @@ static int __init dell_smbios_init(void) + return 0; + + fail_sysfs: +- free_group(platform_device); ++ if (!wmi) ++ exit_dell_smbios_wmi(); ++ if (!smm) ++ exit_dell_smbios_smm(); + + fail_create_group: + platform_device_del(platform_device); +diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c +index a5a31dfa45122..ee2da8e49d4cf 100644 +--- a/drivers/scsi/pm8001/pm8001_sas.c ++++ b/drivers/scsi/pm8001/pm8001_sas.c +@@ -166,7 +166,6 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + unsigned long flags; + pm8001_ha = sas_phy->ha->lldd_ha; + phy = &pm8001_ha->phy[phy_id]; +- pm8001_ha->phy[phy_id].enable_completion = &completion; + + if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) { + /* +@@ -190,6 +189,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + rates->maximum_linkrate; + } + if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { ++ pm8001_ha->phy[phy_id].enable_completion = &completion; + PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); + wait_for_completion(&completion); + } +@@ -198,6 +198,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + break; + case PHY_FUNC_HARD_RESET: + if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { ++ pm8001_ha->phy[phy_id].enable_completion = &completion; + PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); + wait_for_completion(&completion); + } +@@ -206,6 +207,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + break; + case PHY_FUNC_LINK_RESET: + if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { ++ pm8001_ha->phy[phy_id].enable_completion = &completion; + PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); + wait_for_completion(&completion); + } +diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c +index 3c0f7dc9614d1..f7e268cf90850 100644 +--- a/drivers/spi/spi-fsl-lpspi.c ++++ b/drivers/spi/spi-fsl-lpspi.c +@@ -82,6 +82,10 @@ + #define TCR_RXMSK BIT(19) + #define TCR_TXMSK BIT(18) + ++struct fsl_lpspi_devtype_data { ++ u8 prescale_max; ++}; ++ + struct lpspi_config { + u8 bpw; + u8 chip_select; +@@ -119,10 +123,25 @@ struct fsl_lpspi_data { + bool usedma; + struct completion dma_rx_completion; + struct completion dma_tx_completion; ++ ++ const struct fsl_lpspi_devtype_data *devtype_data; ++}; ++ ++/* ++ * ERR051608 fixed or not: ++ * https://www.nxp.com/docs/en/errata/i.MX93_1P87f.pdf ++ */ ++static struct fsl_lpspi_devtype_data imx93_lpspi_devtype_data = { ++ .prescale_max = 1, ++}; ++ ++static struct fsl_lpspi_devtype_data imx7ulp_lpspi_devtype_data = { ++ .prescale_max = 7, + }; + + static const struct of_device_id fsl_lpspi_dt_ids[] = { +- { .compatible = "fsl,imx7ulp-spi", }, ++ { .compatible = "fsl,imx7ulp-spi", .data = &imx7ulp_lpspi_devtype_data,}, ++ { .compatible = "fsl,imx93-spi", .data = &imx93_lpspi_devtype_data,}, + { /* sentinel */ } + }; + MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids); +@@ -297,9 +316,11 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) + { + struct lpspi_config config = fsl_lpspi->config; + unsigned int perclk_rate, scldiv, div; ++ u8 prescale_max; + u8 prescale; + + perclk_rate = clk_get_rate(fsl_lpspi->clk_per); ++ prescale_max = fsl_lpspi->devtype_data->prescale_max; + + if (!config.speed_hz) { + dev_err(fsl_lpspi->dev, +@@ -315,7 +336,7 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) + + div = DIV_ROUND_UP(perclk_rate, config.speed_hz); + +- for (prescale = 0; prescale < 8; prescale++) { ++ for (prescale = 0; prescale <= prescale_max; prescale++) { + scldiv = div / (1 << prescale) - 2; + if (scldiv < 256) { + fsl_lpspi->config.prescale = prescale; +@@ -822,6 +843,7 @@ static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi) + + static int fsl_lpspi_probe(struct platform_device *pdev) + { ++ const struct fsl_lpspi_devtype_data *devtype_data; + struct fsl_lpspi_data *fsl_lpspi; + struct spi_controller *controller; + struct resource *res; +@@ -830,6 +852,10 @@ static int fsl_lpspi_probe(struct platform_device *pdev) + u32 temp; + bool is_target; + ++ devtype_data = of_device_get_match_data(&pdev->dev); ++ if (!devtype_data) ++ return -ENODEV; ++ + is_target = of_property_read_bool((&pdev->dev)->of_node, "spi-slave"); + if (is_target) + controller = devm_spi_alloc_target(&pdev->dev, +@@ -848,6 +874,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev) + fsl_lpspi->is_target = is_target; + fsl_lpspi->is_only_cs1 = of_property_read_bool((&pdev->dev)->of_node, + "fsl,spi-only-use-cs1-sel"); ++ fsl_lpspi->devtype_data = devtype_data; + + init_completion(&fsl_lpspi->xfer_done); + +diff --git a/drivers/spi/spi-hisi-kunpeng.c b/drivers/spi/spi-hisi-kunpeng.c +index 6910b4d4c427b..16054695bdb04 100644 +--- a/drivers/spi/spi-hisi-kunpeng.c ++++ b/drivers/spi/spi-hisi-kunpeng.c +@@ -481,6 +481,9 @@ static int hisi_spi_probe(struct platform_device *pdev) + return -EINVAL; + } + ++ if (host->max_speed_hz == 0) ++ return dev_err_probe(dev, -EINVAL, "spi-max-frequency can't be 0\n"); ++ + ret = device_property_read_u16(dev, "num-cs", + &host->num_chipselect); + if (ret) +diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c +index 5b010094dace5..1f374cf4d6f65 100644 +--- a/drivers/spi/spi-rockchip.c ++++ b/drivers/spi/spi-rockchip.c +@@ -974,14 +974,16 @@ static int rockchip_spi_suspend(struct device *dev) + { + int ret; + struct spi_controller *ctlr = dev_get_drvdata(dev); +- struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); + + ret = spi_controller_suspend(ctlr); + if (ret < 0) + return ret; + +- clk_disable_unprepare(rs->spiclk); +- clk_disable_unprepare(rs->apb_pclk); ++ ret = pm_runtime_force_suspend(dev); ++ if (ret < 0) { ++ spi_controller_resume(ctlr); ++ return ret; ++ } + + pinctrl_pm_select_sleep_state(dev); + +@@ -992,25 +994,14 @@ static int rockchip_spi_resume(struct device *dev) + { + int ret; + struct spi_controller *ctlr = dev_get_drvdata(dev); +- struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); + + pinctrl_pm_select_default_state(dev); + +- ret = clk_prepare_enable(rs->apb_pclk); ++ ret = pm_runtime_force_resume(dev); + if (ret < 0) + return ret; + +- ret = clk_prepare_enable(rs->spiclk); +- if (ret < 0) +- clk_disable_unprepare(rs->apb_pclk); +- +- ret = spi_controller_resume(ctlr); +- if (ret < 0) { +- clk_disable_unprepare(rs->spiclk); +- clk_disable_unprepare(rs->apb_pclk); +- } +- +- return 0; ++ return spi_controller_resume(ctlr); + } + #endif /* CONFIG_PM_SLEEP */ + +diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c +index 285df0e489a62..cfdfe66d74f17 100644 +--- a/drivers/staging/iio/frequency/ad9834.c ++++ b/drivers/staging/iio/frequency/ad9834.c +@@ -114,7 +114,7 @@ static int ad9834_write_frequency(struct ad9834_state *st, + + clk_freq = clk_get_rate(st->mclk); + +- if (fout > (clk_freq / 2)) ++ if (!clk_freq || fout > (clk_freq / 2)) + return -EINVAL; + + regval = ad9834_calc_freqreg(clk_freq, fout); +diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c +index ad0ef5b6b8cf9..ed59d2367a4e7 100644 +--- a/drivers/ufs/core/ufshcd.c ++++ b/drivers/ufs/core/ufshcd.c +@@ -10130,7 +10130,8 @@ void ufshcd_remove(struct ufs_hba *hba) + blk_mq_destroy_queue(hba->tmf_queue); + blk_put_queue(hba->tmf_queue); + blk_mq_free_tag_set(&hba->tmf_tag_set); +- scsi_remove_host(hba->host); ++ if (hba->scsi_host_added) ++ scsi_remove_host(hba->host); + /* disable interrupts */ + ufshcd_disable_intr(hba, hba->intr_mask); + ufshcd_hba_stop(hba); +@@ -10408,6 +10409,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) + dev_err(hba->dev, "scsi_add_host failed\n"); + goto out_disable; + } ++ hba->scsi_host_added = true; + } + + hba->tmf_tag_set = (struct blk_mq_tag_set) { +@@ -10489,7 +10491,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) + free_tmf_tag_set: + blk_mq_free_tag_set(&hba->tmf_tag_set); + out_remove_scsi_host: +- scsi_remove_host(hba->host); ++ if (hba->scsi_host_added) ++ scsi_remove_host(hba->host); + out_disable: + hba->is_irq_enabled = false; + ufshcd_hba_exit(hba); +diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c +index 6be3462b109ff..a2c7abf8c289e 100644 +--- a/drivers/uio/uio_hv_generic.c ++++ b/drivers/uio/uio_hv_generic.c +@@ -104,10 +104,11 @@ static void hv_uio_channel_cb(void *context) + + /* + * Callback from vmbus_event when channel is rescinded. ++ * It is meant for rescind of primary channels only. + */ + static void hv_uio_rescind(struct vmbus_channel *channel) + { +- struct hv_device *hv_dev = channel->primary_channel->device_obj; ++ struct hv_device *hv_dev = channel->device_obj; + struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev); + + /* +@@ -118,6 +119,14 @@ static void hv_uio_rescind(struct vmbus_channel *channel) + + /* Wake up reader */ + uio_event_notify(&pdata->info); ++ ++ /* ++ * With rescind callback registered, rescind path will not unregister the device ++ * from vmbus when the primary channel is rescinded. ++ * Without it, rescind handling is incomplete and next onoffer msg does not come. ++ * Unregister the device from vmbus here. ++ */ ++ vmbus_device_unregister(channel->device_obj); + } + + /* Sysfs API to allow mmap of the ring buffers +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c +index 21ff6cbe5e49f..9955091c53360 100644 +--- a/drivers/usb/dwc3/core.c ++++ b/drivers/usb/dwc3/core.c +@@ -1288,6 +1288,21 @@ static int dwc3_core_init(struct dwc3 *dwc) + dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); + } + ++ /* ++ * STAR 9001285599: This issue affects DWC_usb3 version 3.20a ++ * only. If the PM TIMER ECM is enabled through GUCTL2[19], the ++ * link compliance test (TD7.21) may fail. If the ECN is not ++ * enabled (GUCTL2[19] = 0), the controller will use the old timer ++ * value (5us), which is still acceptable for the link compliance ++ * test. Therefore, do not enable PM TIMER ECM in 3.20a by ++ * setting GUCTL2[19] by default; instead, use GUCTL2[19] = 0. ++ */ ++ if (DWC3_VER_IS(DWC3, 320A)) { ++ reg = dwc3_readl(dwc->regs, DWC3_GUCTL2); ++ reg &= ~DWC3_GUCTL2_LC_TIMER; ++ dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); ++ } ++ + /* + * When configured in HOST mode, after issuing U3/L2 exit controller + * fails to send proper CRC checksum in CRC5 feild. Because of this +diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h +index 07b062c2f6479..db1a793a9b13f 100644 +--- a/drivers/usb/dwc3/core.h ++++ b/drivers/usb/dwc3/core.h +@@ -408,6 +408,7 @@ + + /* Global User Control Register 2 */ + #define DWC3_GUCTL2_RST_ACTBITLATER BIT(14) ++#define DWC3_GUCTL2_LC_TIMER BIT(19) + + /* Global User Control Register 3 */ + #define DWC3_GUCTL3_SPLITDISABLE BIT(14) +@@ -1238,6 +1239,7 @@ struct dwc3 { + #define DWC3_REVISION_290A 0x5533290a + #define DWC3_REVISION_300A 0x5533300a + #define DWC3_REVISION_310A 0x5533310a ++#define DWC3_REVISION_320A 0x5533320a + #define DWC3_REVISION_330A 0x5533330a + + #define DWC31_REVISION_ANY 0x0 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index 579d90efc281a..1a1da4a057019 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -287,6 +287,23 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async); + * + * Caller should handle locking. This function will issue @cmd with given + * @params to @dep and wait for its completion. ++ * ++ * According to the programming guide, if the link state is in L1/L2/U3, ++ * then sending the Start Transfer command may not complete. The ++ * programming guide suggested to bring the link state back to ON/U0 by ++ * performing remote wakeup prior to sending the command. However, don't ++ * initiate remote wakeup when the user/function does not send wakeup ++ * request via wakeup ops. Send the command when it's allowed. ++ * ++ * Notes: ++ * For L1 link state, issuing a command requires the clearing of ++ * GUSB2PHYCFG.SUSPENDUSB2, which turns on the signal required to complete ++ * the given command (usually within 50us). This should happen within the ++ * command timeout set by driver. No additional step is needed. ++ * ++ * For L2 or U3 link state, the gadget is in USB suspend. Care should be ++ * taken when sending Start Transfer command to ensure that it's done after ++ * USB resume. + */ + int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd, + struct dwc3_gadget_ep_cmd_params *params) +@@ -327,30 +344,6 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd, + dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); + } + +- if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { +- int link_state; +- +- /* +- * Initiate remote wakeup if the link state is in U3 when +- * operating in SS/SSP or L1/L2 when operating in HS/FS. If the +- * link state is in U1/U2, no remote wakeup is needed. The Start +- * Transfer command will initiate the link recovery. +- */ +- link_state = dwc3_gadget_get_link_state(dwc); +- switch (link_state) { +- case DWC3_LINK_STATE_U2: +- if (dwc->gadget->speed >= USB_SPEED_SUPER) +- break; +- +- fallthrough; +- case DWC3_LINK_STATE_U3: +- ret = __dwc3_gadget_wakeup(dwc, false); +- dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n", +- ret); +- break; +- } +- } +- + /* + * For some commands such as Update Transfer command, DEPCMDPARn + * registers are reserved. Since the driver often sends Update Transfer +diff --git a/drivers/usb/gadget/udc/aspeed_udc.c b/drivers/usb/gadget/udc/aspeed_udc.c +index fc2ead0fe6217..4868286574a1c 100644 +--- a/drivers/usb/gadget/udc/aspeed_udc.c ++++ b/drivers/usb/gadget/udc/aspeed_udc.c +@@ -1009,6 +1009,8 @@ static void ast_udc_getstatus(struct ast_udc_dev *udc) + break; + case USB_RECIP_ENDPOINT: + epnum = crq.wIndex & USB_ENDPOINT_NUMBER_MASK; ++ if (epnum >= AST_UDC_NUM_ENDPOINTS) ++ goto stall; + status = udc->ep[epnum].stopped; + break; + default: +diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c +index 0eed0e03842cf..d394affb70723 100644 +--- a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c ++++ b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c +@@ -2251,7 +2251,6 @@ static int cdns2_gadget_start(struct cdns2_device *pdev) + { + u32 max_speed; + void *buf; +- int val; + int ret; + + pdev->usb_regs = pdev->regs; +@@ -2261,14 +2260,9 @@ static int cdns2_gadget_start(struct cdns2_device *pdev) + pdev->adma_regs = pdev->regs + CDNS2_ADMA_REGS_OFFSET; + + /* Reset controller. */ +- set_reg_bit_8(&pdev->usb_regs->cpuctrl, CPUCTRL_SW_RST); +- +- ret = readl_poll_timeout_atomic(&pdev->usb_regs->cpuctrl, val, +- !(val & CPUCTRL_SW_RST), 1, 10000); +- if (ret) { +- dev_err(pdev->dev, "Error: reset controller timeout\n"); +- return -EINVAL; +- } ++ writeb(CPUCTRL_SW_RST | CPUCTRL_UPCLK | CPUCTRL_WUEN, ++ &pdev->usb_regs->cpuctrl); ++ usleep_range(5, 10); + + usb_initialize_gadget(pdev->dev, &pdev->gadget, NULL); + +diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h +index 71e2f62d653a5..b5d5ec12e986e 100644 +--- a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h ++++ b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h +@@ -292,8 +292,17 @@ struct cdns2_usb_regs { + #define SPEEDCTRL_HSDISABLE BIT(7) + + /* CPUCTRL- bitmasks. */ ++/* UP clock enable */ ++#define CPUCTRL_UPCLK BIT(0) + /* Controller reset bit. */ + #define CPUCTRL_SW_RST BIT(1) ++/** ++ * If the wuen bit is ‘1’, the upclken is automatically set to ‘1’ after ++ * detecting rising edge of wuintereq interrupt. If the wuen bit is ‘0’, ++ * the wuintereq interrupt is ignored. ++ */ ++#define CPUCTRL_WUEN BIT(7) ++ + + /** + * struct cdns2_adma_regs - ADMA controller registers. +diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c +index 451d9569163a7..f794cb39cc313 100644 +--- a/drivers/usb/storage/uas.c ++++ b/drivers/usb/storage/uas.c +@@ -422,6 +422,7 @@ static void uas_data_cmplt(struct urb *urb) + uas_log_cmd_state(cmnd, "data cmplt err", status); + /* error: no data transfered */ + scsi_set_resid(cmnd, sdb->length); ++ set_host_byte(cmnd, DID_ERROR); + } else { + scsi_set_resid(cmnd, sdb->length - urb->actual_length); + } +diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c +index a94ec6225d31a..5f9e7e4770783 100644 +--- a/drivers/vfio/vfio_iommu_spapr_tce.c ++++ b/drivers/vfio/vfio_iommu_spapr_tce.c +@@ -364,7 +364,6 @@ static void tce_iommu_release(void *iommu_data) + if (!tbl) + continue; + +- tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); + tce_iommu_free_table(container, tbl); + } + +@@ -720,6 +719,8 @@ static long tce_iommu_remove_window(struct tce_container *container, + + BUG_ON(!tbl->it_size); + ++ tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); ++ + /* Detach groups from IOMMUs */ + list_for_each_entry(tcegrp, &container->group_list, next) { + table_group = iommu_group_get_iommudata(tcegrp->grp); +@@ -738,7 +739,6 @@ static long tce_iommu_remove_window(struct tce_container *container, + } + + /* Free table */ +- tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); + tce_iommu_free_table(container, tbl); + container->tables[num] = NULL; + +@@ -1197,9 +1197,14 @@ static void tce_iommu_release_ownership(struct tce_container *container, + return; + } + +- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) +- if (container->tables[i]) ++ for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { ++ if (container->tables[i]) { ++ tce_iommu_clear(container, container->tables[i], ++ container->tables[i]->it_offset, ++ container->tables[i]->it_size); + table_group->ops->unset_window(table_group, i); ++ } ++ } + } + + static long tce_iommu_take_ownership(struct tce_container *container, +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c +index 6f7e5010a6735..80669e05bf0ee 100644 +--- a/drivers/virtio/virtio_ring.c ++++ b/drivers/virtio/virtio_ring.c +@@ -3126,8 +3126,10 @@ dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr, + { + struct vring_virtqueue *vq = to_vvq(_vq); + +- if (!vq->use_dma_api) ++ if (!vq->use_dma_api) { ++ kmsan_handle_dma(virt_to_page(ptr), offset_in_page(ptr), size, dir); + return (dma_addr_t)virt_to_phys(ptr); ++ } + + return dma_map_single_attrs(vring_dma_dev(vq), ptr, size, dir, attrs); + } +diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c +index 923f064c7e3e9..61aaded483e1d 100644 +--- a/drivers/xen/privcmd.c ++++ b/drivers/xen/privcmd.c +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -842,6 +843,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file, + /* Irqfd support */ + static struct workqueue_struct *irqfd_cleanup_wq; + static DEFINE_SPINLOCK(irqfds_lock); ++DEFINE_STATIC_SRCU(irqfds_srcu); + static LIST_HEAD(irqfds_list); + + struct privcmd_kernel_irqfd { +@@ -869,6 +871,9 @@ static void irqfd_shutdown(struct work_struct *work) + container_of(work, struct privcmd_kernel_irqfd, shutdown); + u64 cnt; + ++ /* Make sure irqfd has been initialized in assign path */ ++ synchronize_srcu(&irqfds_srcu); ++ + eventfd_ctx_remove_wait_queue(kirqfd->eventfd, &kirqfd->wait, &cnt); + eventfd_ctx_put(kirqfd->eventfd); + kfree(kirqfd); +@@ -931,7 +936,7 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd) + __poll_t events; + struct fd f; + void *dm_op; +- int ret; ++ int ret, idx; + + kirqfd = kzalloc(sizeof(*kirqfd) + irqfd->size, GFP_KERNEL); + if (!kirqfd) +@@ -977,6 +982,7 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd) + } + } + ++ idx = srcu_read_lock(&irqfds_srcu); + list_add_tail(&kirqfd->list, &irqfds_list); + spin_unlock_irqrestore(&irqfds_lock, flags); + +@@ -988,6 +994,8 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd) + if (events & EPOLLIN) + irqfd_inject(kirqfd); + ++ srcu_read_unlock(&irqfds_srcu, idx); ++ + /* + * Do not drop the file until the kirqfd is fully initialized, otherwise + * we might race against the EPOLLHUP. +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c +index 7b3d2d4914073..fb2c8d14327ae 100644 +--- a/fs/binfmt_elf.c ++++ b/fs/binfmt_elf.c +@@ -1008,7 +1008,8 @@ static int load_elf_binary(struct linux_binprm *bprm) + if (elf_read_implies_exec(*elf_ex, executable_stack)) + current->personality |= READ_IMPLIES_EXEC; + +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) ++ const int snapshot_randomize_va_space = READ_ONCE(randomize_va_space); ++ if (!(current->personality & ADDR_NO_RANDOMIZE) && snapshot_randomize_va_space) + current->flags |= PF_RANDOMIZE; + + setup_new_exec(bprm); +@@ -1300,7 +1301,7 @@ static int load_elf_binary(struct linux_binprm *bprm) + mm->end_data = end_data; + mm->start_stack = bprm->p; + +- if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) { ++ if ((current->flags & PF_RANDOMIZE) && (snapshot_randomize_va_space > 1)) { + /* + * For architectures with ELF randomization, when executing + * a loader directly (i.e. no interpreter listed in ELF +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c +index 118ad4d2cbbe2..2eb4e03080ac9 100644 +--- a/fs/btrfs/ctree.c ++++ b/fs/btrfs/ctree.c +@@ -451,8 +451,16 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, + } + + owner = btrfs_header_owner(buf); +- BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID && +- !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); ++ if (unlikely(owner == BTRFS_TREE_RELOC_OBJECTID && ++ !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))) { ++ btrfs_crit(fs_info, ++"found tree block at bytenr %llu level %d root %llu refs %llu flags %llx without full backref flag set", ++ buf->start, btrfs_header_level(buf), ++ btrfs_root_id(root), refs, flags); ++ ret = -EUCLEAN; ++ btrfs_abort_transaction(trans, ret); ++ return ret; ++ } + + if (refs > 1) { + if ((owner == root->root_key.objectid || +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index 86c7f8ce1715e..06333a74d6c4c 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -445,7 +445,6 @@ struct btrfs_file_private { + void *filldir_buf; + u64 last_index; + struct extent_state *llseek_cached_state; +- bool fsync_skip_inode_lock; + }; + + static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info) +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index c6ecfd05e1db9..72851adc1feeb 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -5085,7 +5085,15 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans, + /* We don't care about errors in readahead. */ + if (ret < 0) + continue; +- BUG_ON(refs == 0); ++ ++ /* ++ * This could be racey, it's conceivable that we raced and end ++ * up with a bogus refs count, if that's the case just skip, if ++ * we are actually corrupt we will notice when we look up ++ * everything again with our locks. ++ */ ++ if (refs == 0) ++ continue; + + if (wc->stage == DROP_REFERENCE) { + if (refs == 1) +@@ -5144,7 +5152,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans, + if (lookup_info && + ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || + (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) { +- BUG_ON(!path->locks[level]); ++ ASSERT(path->locks[level]); + ret = btrfs_lookup_extent_info(trans, fs_info, + eb->start, level, 1, + &wc->refs[level], +@@ -5152,7 +5160,11 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans, + BUG_ON(ret == -ENOMEM); + if (ret) + return ret; +- BUG_ON(wc->refs[level] == 0); ++ if (unlikely(wc->refs[level] == 0)) { ++ btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0", ++ eb->start); ++ return -EUCLEAN; ++ } + } + + if (wc->stage == DROP_REFERENCE) { +@@ -5168,7 +5180,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans, + + /* wc->stage == UPDATE_BACKREF */ + if (!(wc->flags[level] & flag)) { +- BUG_ON(!path->locks[level]); ++ ASSERT(path->locks[level]); + ret = btrfs_inc_ref(trans, root, eb, 1); + BUG_ON(ret); /* -ENOMEM */ + ret = btrfs_dec_ref(trans, root, eb, 0); +@@ -5286,8 +5298,9 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans, + goto out_unlock; + + if (unlikely(wc->refs[level - 1] == 0)) { +- btrfs_err(fs_info, "Missing references."); +- ret = -EIO; ++ btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0", ++ bytenr); ++ ret = -EUCLEAN; + goto out_unlock; + } + *lookup_info = 0; +@@ -5487,7 +5500,12 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans, + path->locks[level] = 0; + return ret; + } +- BUG_ON(wc->refs[level] == 0); ++ if (unlikely(wc->refs[level] == 0)) { ++ btrfs_tree_unlock_rw(eb, path->locks[level]); ++ btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0", ++ eb->start); ++ return -EUCLEAN; ++ } + if (wc->refs[level] == 1) { + btrfs_tree_unlock_rw(eb, path->locks[level]); + path->locks[level] = 0; +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c +index 952cf145c6295..15fd8c00f4c08 100644 +--- a/fs/btrfs/file.c ++++ b/fs/btrfs/file.c +@@ -1543,13 +1543,6 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) + if (IS_ERR_OR_NULL(dio)) { + err = PTR_ERR_OR_ZERO(dio); + } else { +- struct btrfs_file_private stack_private = { 0 }; +- struct btrfs_file_private *private; +- const bool have_private = (file->private_data != NULL); +- +- if (!have_private) +- file->private_data = &stack_private; +- + /* + * If we have a synchoronous write, we must make sure the fsync + * triggered by the iomap_dio_complete() call below doesn't +@@ -1558,13 +1551,10 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) + * partial writes due to the input buffer (or parts of it) not + * being already faulted in. + */ +- private = file->private_data; +- private->fsync_skip_inode_lock = true; ++ ASSERT(current->journal_info == NULL); ++ current->journal_info = BTRFS_TRANS_DIO_WRITE_STUB; + err = iomap_dio_complete(dio); +- private->fsync_skip_inode_lock = false; +- +- if (!have_private) +- file->private_data = NULL; ++ current->journal_info = NULL; + } + + /* No increment (+=) because iomap returns a cumulative value. */ +@@ -1796,7 +1786,6 @@ static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx) + */ + int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) + { +- struct btrfs_file_private *private = file->private_data; + struct dentry *dentry = file_dentry(file); + struct inode *inode = d_inode(dentry); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); +@@ -1806,7 +1795,13 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) + int ret = 0, err; + u64 len; + bool full_sync; +- const bool skip_ilock = (private ? private->fsync_skip_inode_lock : false); ++ bool skip_ilock = false; ++ ++ if (current->journal_info == BTRFS_TRANS_DIO_WRITE_STUB) { ++ skip_ilock = true; ++ current->journal_info = NULL; ++ lockdep_assert_held(&inode->i_rwsem); ++ } + + trace_btrfs_sync_file(file, datasync); + +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 18ce5353092d7..a422382118878 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -5668,7 +5668,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) + struct inode *inode; + struct btrfs_root *root = BTRFS_I(dir)->root; + struct btrfs_root *sub_root = root; +- struct btrfs_key location; ++ struct btrfs_key location = { 0 }; + u8 di_type = 0; + int ret = 0; + +diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h +index 238a0ab85df9b..7623db359881e 100644 +--- a/fs/btrfs/transaction.h ++++ b/fs/btrfs/transaction.h +@@ -12,6 +12,12 @@ + #include "ctree.h" + #include "misc.h" + ++/* ++ * Signal that a direct IO write is in progress, to avoid deadlock for sync ++ * direct IO writes when fsync is called during the direct IO write path. ++ */ ++#define BTRFS_TRANS_DIO_WRITE_STUB ((void *) 1) ++ + /* Radix-tree tag for roots that are part of the trasaction. */ + #define BTRFS_ROOT_TRANS_TAG 0 + +diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c +index 5d473e50598f9..f32a91d7c05d9 100644 +--- a/fs/ext4/fast_commit.c ++++ b/fs/ext4/fast_commit.c +@@ -353,7 +353,7 @@ void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handl + read_unlock(&sbi->s_journal->j_state_lock); + } + spin_lock(&sbi->s_fc_lock); +- if (sbi->s_fc_ineligible_tid < tid) ++ if (tid_gt(tid, sbi->s_fc_ineligible_tid)) + sbi->s_fc_ineligible_tid = tid; + spin_unlock(&sbi->s_fc_lock); + WARN_ON(reason >= EXT4_FC_REASON_MAX); +@@ -1213,7 +1213,7 @@ int ext4_fc_commit(journal_t *journal, tid_t commit_tid) + if (ret == -EALREADY) { + /* There was an ongoing commit, check if we need to restart */ + if (atomic_read(&sbi->s_fc_subtid) <= subtid && +- commit_tid > journal->j_commit_sequence) ++ tid_gt(commit_tid, journal->j_commit_sequence)) + goto restart_fc; + ext4_fc_update_stats(sb, EXT4_FC_STATUS_SKIPPED, 0, 0, + commit_tid); +@@ -1288,7 +1288,7 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid) + list_del_init(&iter->i_fc_list); + ext4_clear_inode_state(&iter->vfs_inode, + EXT4_STATE_FC_COMMITTING); +- if (iter->i_sync_tid <= tid) ++ if (tid_geq(tid, iter->i_sync_tid)) + ext4_fc_reset_inode(&iter->vfs_inode); + /* Make sure EXT4_STATE_FC_COMMITTING bit is clear */ + smp_mb(); +@@ -1319,7 +1319,7 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid) + list_splice_init(&sbi->s_fc_q[FC_Q_STAGING], + &sbi->s_fc_q[FC_Q_MAIN]); + +- if (tid >= sbi->s_fc_ineligible_tid) { ++ if (tid_geq(tid, sbi->s_fc_ineligible_tid)) { + sbi->s_fc_ineligible_tid = 0; + ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE); + } +diff --git a/fs/fscache/main.c b/fs/fscache/main.c +index dad85fd84f6f9..7a60cd96e87ea 100644 +--- a/fs/fscache/main.c ++++ b/fs/fscache/main.c +@@ -114,6 +114,7 @@ static void __exit fscache_exit(void) + + kmem_cache_destroy(fscache_cookie_jar); + fscache_proc_cleanup(); ++ timer_shutdown_sync(&fscache_cookie_lru_timer); + destroy_workqueue(fscache_wq); + pr_notice("Unloaded\n"); + } +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c +index c1703d2c4bf9a..95f9913a35373 100644 +--- a/fs/fuse/dir.c ++++ b/fs/fuse/dir.c +@@ -668,7 +668,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, + + err = get_create_ext(&args, dir, entry, mode); + if (err) +- goto out_put_forget_req; ++ goto out_free_ff; + + err = fuse_simple_request(fm, &args); + free_ext_value(&args); +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index cc9651a01351c..ceb9f7d230388 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -1735,10 +1735,16 @@ __acquires(fi->lock) + fuse_writepage_finish(fm, wpa); + spin_unlock(&fi->lock); + +- /* After fuse_writepage_finish() aux request list is private */ ++ /* After rb_erase() aux request list is private */ + for (aux = wpa->next; aux; aux = next) { ++ struct backing_dev_info *bdi = inode_to_bdi(aux->inode); ++ + next = aux->next; + aux->next = NULL; ++ ++ dec_wb_stat(&bdi->wb, WB_WRITEBACK); ++ dec_node_page_state(aux->ia.ap.pages[0], NR_WRITEBACK_TEMP); ++ wb_writeout_inc(&bdi->wb); + fuse_writepage_free(aux); + } + +diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c +index 49c01559580f4..690b9aadceaa8 100644 +--- a/fs/fuse/xattr.c ++++ b/fs/fuse/xattr.c +@@ -81,7 +81,7 @@ ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value, + } + ret = fuse_simple_request(fm, &args); + if (!ret && !size) +- ret = min_t(ssize_t, outarg.size, XATTR_SIZE_MAX); ++ ret = min_t(size_t, outarg.size, XATTR_SIZE_MAX); + if (ret == -ENOSYS) { + fm->fc->no_getxattr = 1; + ret = -EOPNOTSUPP; +@@ -143,7 +143,7 @@ ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size) + } + ret = fuse_simple_request(fm, &args); + if (!ret && !size) +- ret = min_t(ssize_t, outarg.size, XATTR_LIST_MAX); ++ ret = min_t(size_t, outarg.size, XATTR_LIST_MAX); + if (ret > 0 && size) + ret = fuse_verify_xattr_list(list, ret); + if (ret == -ENOSYS) { +diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c +index 5b771a3d8d9ae..421c0d360836e 100644 +--- a/fs/jbd2/recovery.c ++++ b/fs/jbd2/recovery.c +@@ -448,6 +448,27 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf) + return provided == cpu_to_be32(calculated); + } + ++static bool jbd2_commit_block_csum_verify_partial(journal_t *j, void *buf) ++{ ++ struct commit_header *h; ++ __be32 provided; ++ __u32 calculated; ++ void *tmpbuf; ++ ++ tmpbuf = kzalloc(j->j_blocksize, GFP_KERNEL); ++ if (!tmpbuf) ++ return false; ++ ++ memcpy(tmpbuf, buf, sizeof(struct commit_header)); ++ h = tmpbuf; ++ provided = h->h_chksum[0]; ++ h->h_chksum[0] = 0; ++ calculated = jbd2_chksum(j, j->j_csum_seed, tmpbuf, j->j_blocksize); ++ kfree(tmpbuf); ++ ++ return provided == cpu_to_be32(calculated); ++} ++ + static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag, + journal_block_tag3_t *tag3, + void *buf, __u32 sequence) +@@ -814,6 +835,13 @@ static int do_one_pass(journal_t *journal, + if (pass == PASS_SCAN && + !jbd2_commit_block_csum_verify(journal, + bh->b_data)) { ++ if (jbd2_commit_block_csum_verify_partial( ++ journal, ++ bh->b_data)) { ++ pr_notice("JBD2: Find incomplete commit block in transaction %u block %lu\n", ++ next_commit_ID, next_log_block); ++ goto chksum_ok; ++ } + chksum_error: + if (commit_time < last_trans_commit_time) + goto ignore_crc_mismatch; +@@ -828,6 +856,7 @@ static int do_one_pass(journal_t *journal, + } + } + if (pass == PASS_SCAN) { ++ chksum_ok: + last_trans_commit_time = commit_time; + head_block = next_log_block; + } +@@ -847,6 +876,7 @@ static int do_one_pass(journal_t *journal, + next_log_block); + need_check_commit_time = true; + } ++ + /* If we aren't in the REVOKE pass, then we can + * just skip over this block. */ + if (pass != PASS_REVOKE) { +diff --git a/fs/nfs/super.c b/fs/nfs/super.c +index 0d6473cb00cb3..f63513e477c50 100644 +--- a/fs/nfs/super.c ++++ b/fs/nfs/super.c +@@ -47,6 +47,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -223,6 +224,7 @@ static int __nfs_list_for_each_server(struct list_head *head, + ret = fn(server, data); + if (ret) + goto out; ++ cond_resched(); + rcu_read_lock(); + } + rcu_read_unlock(); +diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c +index a9b8d77c8c1d5..ce30b51ac593c 100644 +--- a/fs/nilfs2/recovery.c ++++ b/fs/nilfs2/recovery.c +@@ -708,6 +708,33 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs, + brelse(bh); + } + ++/** ++ * nilfs_abort_roll_forward - cleaning up after a failed rollforward recovery ++ * @nilfs: nilfs object ++ */ ++static void nilfs_abort_roll_forward(struct the_nilfs *nilfs) ++{ ++ struct nilfs_inode_info *ii, *n; ++ LIST_HEAD(head); ++ ++ /* Abandon inodes that have read recovery data */ ++ spin_lock(&nilfs->ns_inode_lock); ++ list_splice_init(&nilfs->ns_dirty_files, &head); ++ spin_unlock(&nilfs->ns_inode_lock); ++ if (list_empty(&head)) ++ return; ++ ++ set_nilfs_purging(nilfs); ++ list_for_each_entry_safe(ii, n, &head, i_dirty) { ++ spin_lock(&nilfs->ns_inode_lock); ++ list_del_init(&ii->i_dirty); ++ spin_unlock(&nilfs->ns_inode_lock); ++ ++ iput(&ii->vfs_inode); ++ } ++ clear_nilfs_purging(nilfs); ++} ++ + /** + * nilfs_salvage_orphan_logs - salvage logs written after the latest checkpoint + * @nilfs: nilfs object +@@ -766,15 +793,19 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs, + if (unlikely(err)) { + nilfs_err(sb, "error %d writing segment for recovery", + err); +- goto failed; ++ goto put_root; + } + + nilfs_finish_roll_forward(nilfs, ri); + } + +- failed: ++put_root: + nilfs_put_root(root); + return err; ++ ++failed: ++ nilfs_abort_roll_forward(nilfs); ++ goto put_root; + } + + /** +diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c +index e10f8a777ab06..0610cb12c11ca 100644 +--- a/fs/nilfs2/segment.c ++++ b/fs/nilfs2/segment.c +@@ -1835,6 +1835,9 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci, + nilfs_abort_logs(&logs, ret ? : err); + + list_splice_tail_init(&sci->sc_segbufs, &logs); ++ if (list_empty(&logs)) ++ return; /* if the first segment buffer preparation failed */ ++ + nilfs_cancel_segusage(&logs, nilfs->ns_sufile); + nilfs_free_incomplete_logs(&logs, nilfs); + +@@ -2079,7 +2082,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) + + err = nilfs_segctor_begin_construction(sci, nilfs); + if (unlikely(err)) +- goto out; ++ goto failed; + + /* Update time stamp */ + sci->sc_seg_ctime = ktime_get_real_seconds(); +@@ -2142,10 +2145,9 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) + return err; + + failed_to_write: +- if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED) +- nilfs_redirty_inodes(&sci->sc_dirty_files); +- + failed: ++ if (mode == SC_LSEG_SR && nilfs_sc_cstage_get(sci) >= NILFS_ST_IFILE) ++ nilfs_redirty_inodes(&sci->sc_dirty_files); + if (nilfs_doing_gc()) + nilfs_redirty_inodes(&sci->sc_gc_inodes); + nilfs_segctor_abort_construction(sci, nilfs, err); +diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c +index 379d22e28ed62..905c7eadf9676 100644 +--- a/fs/nilfs2/sysfs.c ++++ b/fs/nilfs2/sysfs.c +@@ -836,9 +836,15 @@ ssize_t nilfs_dev_revision_show(struct nilfs_dev_attr *attr, + struct the_nilfs *nilfs, + char *buf) + { +- struct nilfs_super_block **sbp = nilfs->ns_sbp; +- u32 major = le32_to_cpu(sbp[0]->s_rev_level); +- u16 minor = le16_to_cpu(sbp[0]->s_minor_rev_level); ++ struct nilfs_super_block *raw_sb; ++ u32 major; ++ u16 minor; ++ ++ down_read(&nilfs->ns_sem); ++ raw_sb = nilfs->ns_sbp[0]; ++ major = le32_to_cpu(raw_sb->s_rev_level); ++ minor = le16_to_cpu(raw_sb->s_minor_rev_level); ++ up_read(&nilfs->ns_sem); + + return sysfs_emit(buf, "%d.%d\n", major, minor); + } +@@ -856,8 +862,13 @@ ssize_t nilfs_dev_device_size_show(struct nilfs_dev_attr *attr, + struct the_nilfs *nilfs, + char *buf) + { +- struct nilfs_super_block **sbp = nilfs->ns_sbp; +- u64 dev_size = le64_to_cpu(sbp[0]->s_dev_size); ++ struct nilfs_super_block *raw_sb; ++ u64 dev_size; ++ ++ down_read(&nilfs->ns_sem); ++ raw_sb = nilfs->ns_sbp[0]; ++ dev_size = le64_to_cpu(raw_sb->s_dev_size); ++ up_read(&nilfs->ns_sem); + + return sysfs_emit(buf, "%llu\n", dev_size); + } +@@ -879,9 +890,15 @@ ssize_t nilfs_dev_uuid_show(struct nilfs_dev_attr *attr, + struct the_nilfs *nilfs, + char *buf) + { +- struct nilfs_super_block **sbp = nilfs->ns_sbp; ++ struct nilfs_super_block *raw_sb; ++ ssize_t len; + +- return sysfs_emit(buf, "%pUb\n", sbp[0]->s_uuid); ++ down_read(&nilfs->ns_sem); ++ raw_sb = nilfs->ns_sbp[0]; ++ len = sysfs_emit(buf, "%pUb\n", raw_sb->s_uuid); ++ up_read(&nilfs->ns_sem); ++ ++ return len; + } + + static +@@ -889,10 +906,16 @@ ssize_t nilfs_dev_volume_name_show(struct nilfs_dev_attr *attr, + struct the_nilfs *nilfs, + char *buf) + { +- struct nilfs_super_block **sbp = nilfs->ns_sbp; ++ struct nilfs_super_block *raw_sb; ++ ssize_t len; ++ ++ down_read(&nilfs->ns_sem); ++ raw_sb = nilfs->ns_sbp[0]; ++ len = scnprintf(buf, sizeof(raw_sb->s_volume_name), "%s\n", ++ raw_sb->s_volume_name); ++ up_read(&nilfs->ns_sem); + +- return scnprintf(buf, sizeof(sbp[0]->s_volume_name), "%s\n", +- sbp[0]->s_volume_name); ++ return len; + } + + static const char dev_readme_str[] = +diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c +index 9d0a09f00b384..e1b856ecce61d 100644 +--- a/fs/ntfs3/dir.c ++++ b/fs/ntfs3/dir.c +@@ -272,9 +272,12 @@ struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni, + return err == -ENOENT ? NULL : err ? ERR_PTR(err) : inode; + } + +-static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni, +- const struct NTFS_DE *e, u8 *name, +- struct dir_context *ctx) ++/* ++ * returns false if 'ctx' if full ++ */ ++static inline bool ntfs_dir_emit(struct ntfs_sb_info *sbi, ++ struct ntfs_inode *ni, const struct NTFS_DE *e, ++ u8 *name, struct dir_context *ctx) + { + const struct ATTR_FILE_NAME *fname; + unsigned long ino; +@@ -284,29 +287,29 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni, + fname = Add2Ptr(e, sizeof(struct NTFS_DE)); + + if (fname->type == FILE_NAME_DOS) +- return 0; ++ return true; + + if (!mi_is_ref(&ni->mi, &fname->home)) +- return 0; ++ return true; + + ino = ino_get(&e->ref); + + if (ino == MFT_REC_ROOT) +- return 0; ++ return true; + + /* Skip meta files. Unless option to show metafiles is set. */ + if (!sbi->options->showmeta && ntfs_is_meta_file(sbi, ino)) +- return 0; ++ return true; + + if (sbi->options->nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN)) +- return 0; ++ return true; + + name_len = ntfs_utf16_to_nls(sbi, fname->name, fname->name_len, name, + PATH_MAX); + if (name_len <= 0) { + ntfs_warn(sbi->sb, "failed to convert name for inode %lx.", + ino); +- return 0; ++ return true; + } + + /* +@@ -336,17 +339,20 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni, + } + } + +- return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type); ++ return dir_emit(ctx, (s8 *)name, name_len, ino, dt_type); + } + + /* + * ntfs_read_hdr - Helper function for ntfs_readdir(). ++ * ++ * returns 0 if ok. ++ * returns -EINVAL if directory is corrupted. ++ * returns +1 if 'ctx' is full. + */ + static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni, + const struct INDEX_HDR *hdr, u64 vbo, u64 pos, + u8 *name, struct dir_context *ctx) + { +- int err; + const struct NTFS_DE *e; + u32 e_size; + u32 end = le32_to_cpu(hdr->used); +@@ -354,12 +360,12 @@ static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni, + + for (;; off += e_size) { + if (off + sizeof(struct NTFS_DE) > end) +- return -1; ++ return -EINVAL; + + e = Add2Ptr(hdr, off); + e_size = le16_to_cpu(e->size); + if (e_size < sizeof(struct NTFS_DE) || off + e_size > end) +- return -1; ++ return -EINVAL; + + if (de_is_last(e)) + return 0; +@@ -369,14 +375,15 @@ static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni, + continue; + + if (le16_to_cpu(e->key_size) < SIZEOF_ATTRIBUTE_FILENAME) +- return -1; ++ return -EINVAL; + + ctx->pos = vbo + off; + + /* Submit the name to the filldir callback. */ +- err = ntfs_filldir(sbi, ni, e, name, ctx); +- if (err) +- return err; ++ if (!ntfs_dir_emit(sbi, ni, e, name, ctx)) { ++ /* ctx is full. */ ++ return +1; ++ } + } + } + +@@ -475,8 +482,6 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx) + + vbo = (u64)bit << index_bits; + if (vbo >= i_size) { +- ntfs_inode_err(dir, "Looks like your dir is corrupt"); +- ctx->pos = eod; + err = -EINVAL; + goto out; + } +@@ -499,9 +504,16 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx) + __putname(name); + put_indx_node(node); + +- if (err == -ENOENT) { ++ if (err == 1) { ++ /* 'ctx' is full. */ ++ err = 0; ++ } else if (err == -ENOENT) { + err = 0; + ctx->pos = pos; ++ } else if (err < 0) { ++ if (err == -EINVAL) ++ ntfs_inode_err(dir, "directory corrupted"); ++ ctx->pos = eod; + } + + return err; +diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c +index 45b687aff700b..f7c381730b396 100644 +--- a/fs/ntfs3/frecord.c ++++ b/fs/ntfs3/frecord.c +@@ -1601,8 +1601,10 @@ int ni_delete_all(struct ntfs_inode *ni) + asize = le32_to_cpu(attr->size); + roff = le16_to_cpu(attr->nres.run_off); + +- if (roff > asize) ++ if (roff > asize) { ++ _ntfs_bad_inode(&ni->vfs_inode); + return -EINVAL; ++ } + + /* run==1 means unpack and deallocate. */ + run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn, +diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c +index 15cbfec4c28c7..dd8acd2077521 100644 +--- a/fs/smb/client/smb2inode.c ++++ b/fs/smb/client/smb2inode.c +@@ -1106,6 +1106,8 @@ int smb2_rename_path(const unsigned int xid, + co, DELETE, SMB2_OP_RENAME, cfile, source_dentry); + if (rc == -EINVAL) { + cifs_dbg(FYI, "invalid lease key, resending request without lease"); ++ cifs_get_writable_path(tcon, from_name, ++ FIND_WR_WITH_DELETE, &cfile); + rc = smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb, + co, DELETE, SMB2_OP_RENAME, cfile, NULL); + } +@@ -1149,6 +1151,7 @@ smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon, + cfile, NULL, NULL, dentry); + if (rc == -EINVAL) { + cifs_dbg(FYI, "invalid lease key, resending request without lease"); ++ cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile); + rc = smb2_compound_op(xid, tcon, cifs_sb, + full_path, &oparms, &in_iov, + &(int){SMB2_OP_SET_EOF}, 1, +diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c +index 012d6ec12a691..acd5d7d793525 100644 +--- a/fs/smb/client/smb2ops.c ++++ b/fs/smb/client/smb2ops.c +@@ -3186,13 +3186,15 @@ static long smb3_zero_data(struct file *file, struct cifs_tcon *tcon, + } + + static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, +- loff_t offset, loff_t len, bool keep_size) ++ unsigned long long offset, unsigned long long len, ++ bool keep_size) + { + struct cifs_ses *ses = tcon->ses; + struct inode *inode = file_inode(file); + struct cifsInodeInfo *cifsi = CIFS_I(inode); + struct cifsFileInfo *cfile = file->private_data; +- unsigned long long new_size; ++ struct netfs_inode *ictx = netfs_inode(inode); ++ unsigned long long i_size, new_size, remote_size; + long rc; + unsigned int xid; + +@@ -3204,6 +3206,16 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, + inode_lock(inode); + filemap_invalidate_lock(inode->i_mapping); + ++ i_size = i_size_read(inode); ++ remote_size = ictx->remote_i_size; ++ if (offset + len >= remote_size && offset < i_size) { ++ unsigned long long top = umin(offset + len, i_size); ++ ++ rc = filemap_write_and_wait_range(inode->i_mapping, offset, top - 1); ++ if (rc < 0) ++ goto zero_range_exit; ++ } ++ + /* + * We zero the range through ioctl, so we need remove the page caches + * first, otherwise the data may be inconsistent with the server. +diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c +index a8f52c4ebbdad..e546ffa57b55a 100644 +--- a/fs/smb/server/oplock.c ++++ b/fs/smb/server/oplock.c +@@ -1510,7 +1510,7 @@ void create_lease_buf(u8 *rbuf, struct lease *lease) + * parse_lease_state() - parse lease context containted in file open request + * @open_req: buffer containing smb2 file open(create) request + * +- * Return: oplock state, -ENOENT if create lease context not found ++ * Return: allocated lease context object on success, otherwise NULL + */ + struct lease_ctx_info *parse_lease_state(void *open_req) + { +diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c +index da57adc2bd689..458cc736286aa 100644 +--- a/fs/smb/server/smb2pdu.c ++++ b/fs/smb/server/smb2pdu.c +@@ -1687,6 +1687,8 @@ int smb2_sess_setup(struct ksmbd_work *work) + rc = ksmbd_session_register(conn, sess); + if (rc) + goto out_err; ++ ++ conn->binding = false; + } else if (conn->dialect >= SMB30_PROT_ID && + (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) && + req->Flags & SMB2_SESSION_REQ_FLAG_BINDING) { +@@ -1765,6 +1767,8 @@ int smb2_sess_setup(struct ksmbd_work *work) + sess = NULL; + goto out_err; + } ++ ++ conn->binding = false; + } + work->sess = sess; + +@@ -2767,8 +2771,8 @@ static int parse_durable_handle_context(struct ksmbd_work *work, + } + } + +- if (((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) || +- req_op_level == SMB2_OPLOCK_LEVEL_BATCH)) { ++ if ((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) || ++ req_op_level == SMB2_OPLOCK_LEVEL_BATCH) { + dh_info->CreateGuid = + durable_v2_blob->CreateGuid; + dh_info->persistent = +@@ -2788,8 +2792,8 @@ static int parse_durable_handle_context(struct ksmbd_work *work, + goto out; + } + +- if (((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) || +- req_op_level == SMB2_OPLOCK_LEVEL_BATCH)) { ++ if ((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) || ++ req_op_level == SMB2_OPLOCK_LEVEL_BATCH) { + ksmbd_debug(SMB, "Request for durable open\n"); + dh_info->type = dh_idx; + } +@@ -3411,7 +3415,7 @@ int smb2_open(struct ksmbd_work *work) + goto err_out1; + } + } else { +- if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE) { ++ if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE && lc) { + if (S_ISDIR(file_inode(filp)->i_mode)) { + lc->req_state &= ~SMB2_LEASE_WRITE_CACHING_LE; + lc->is_dir = true; +diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c +index 6633fa78e9b96..2ce7f75059cb3 100644 +--- a/fs/smb/server/transport_tcp.c ++++ b/fs/smb/server/transport_tcp.c +@@ -624,8 +624,10 @@ int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz) + for_each_netdev(&init_net, netdev) { + if (netif_is_bridge_port(netdev)) + continue; +- if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL))) ++ if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL))) { ++ rtnl_unlock(); + return -ENOMEM; ++ } + } + rtnl_unlock(); + bind_additional_ifaces = 1; +diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c +index 16bd693d0b3aa..d5918eba27e37 100644 +--- a/fs/squashfs/inode.c ++++ b/fs/squashfs/inode.c +@@ -279,8 +279,13 @@ int squashfs_read_inode(struct inode *inode, long long ino) + if (err < 0) + goto failed_read; + +- set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); + inode->i_size = le32_to_cpu(sqsh_ino->symlink_size); ++ if (inode->i_size > PAGE_SIZE) { ++ ERROR("Corrupted symlink\n"); ++ return -EINVAL; ++ } ++ ++ set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); + inode->i_op = &squashfs_symlink_inode_ops; + inode_nohighmem(inode); + inode->i_data.a_ops = &squashfs_symlink_aops; +diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c +index aa54be1ce1242..73a08db3b7a85 100644 +--- a/fs/tracefs/event_inode.c ++++ b/fs/tracefs/event_inode.c +@@ -935,7 +935,7 @@ static void eventfs_remove_rec(struct eventfs_inode *ei, int level) + list_for_each_entry(ei_child, &ei->children, list) + eventfs_remove_rec(ei_child, level + 1); + +- list_del(&ei->list); ++ list_del_rcu(&ei->list); + free_ei(ei); + } + +diff --git a/fs/udf/super.c b/fs/udf/super.c +index e0080fda2526b..3c78535f406b0 100644 +--- a/fs/udf/super.c ++++ b/fs/udf/super.c +@@ -1080,12 +1080,19 @@ static int udf_fill_partdesc_info(struct super_block *sb, + struct udf_part_map *map; + struct udf_sb_info *sbi = UDF_SB(sb); + struct partitionHeaderDesc *phd; ++ u32 sum; + int err; + + map = &sbi->s_partmaps[p_index]; + + map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */ + map->s_partition_root = le32_to_cpu(p->partitionStartingLocation); ++ if (check_add_overflow(map->s_partition_root, map->s_partition_len, ++ &sum)) { ++ udf_err(sb, "Partition %d has invalid location %u + %u\n", ++ p_index, map->s_partition_root, map->s_partition_len); ++ return -EFSCORRUPTED; ++ } + + if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY)) + map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY; +@@ -1141,6 +1148,14 @@ static int udf_fill_partdesc_info(struct super_block *sb, + bitmap->s_extPosition = le32_to_cpu( + phd->unallocSpaceBitmap.extPosition); + map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP; ++ /* Check whether math over bitmap won't overflow. */ ++ if (check_add_overflow(map->s_partition_len, ++ sizeof(struct spaceBitmapDesc) << 3, ++ &sum)) { ++ udf_err(sb, "Partition %d is too long (%u)\n", p_index, ++ map->s_partition_len); ++ return -EFSCORRUPTED; ++ } + udf_debug("unallocSpaceBitmap (part %d) @ %u\n", + p_index, bitmap->s_extPosition); + } +diff --git a/fs/xattr.c b/fs/xattr.c +index efd4736bc94b0..c20046548f218 100644 +--- a/fs/xattr.c ++++ b/fs/xattr.c +@@ -631,10 +631,9 @@ int do_setxattr(struct mnt_idmap *idmap, struct dentry *dentry, + ctx->kvalue, ctx->size, ctx->flags); + } + +-static long +-setxattr(struct mnt_idmap *idmap, struct dentry *d, +- const char __user *name, const void __user *value, size_t size, +- int flags) ++static int path_setxattr(const char __user *pathname, ++ const char __user *name, const void __user *value, ++ size_t size, int flags, unsigned int lookup_flags) + { + struct xattr_name kname; + struct xattr_ctx ctx = { +@@ -644,33 +643,20 @@ setxattr(struct mnt_idmap *idmap, struct dentry *d, + .kname = &kname, + .flags = flags, + }; ++ struct path path; + int error; + + error = setxattr_copy(name, &ctx); + if (error) + return error; + +- error = do_setxattr(idmap, d, &ctx); +- +- kvfree(ctx.kvalue); +- return error; +-} +- +-static int path_setxattr(const char __user *pathname, +- const char __user *name, const void __user *value, +- size_t size, int flags, unsigned int lookup_flags) +-{ +- struct path path; +- int error; +- + retry: + error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path); + if (error) +- return error; ++ goto out; + error = mnt_want_write(path.mnt); + if (!error) { +- error = setxattr(mnt_idmap(path.mnt), path.dentry, name, +- value, size, flags); ++ error = do_setxattr(mnt_idmap(path.mnt), path.dentry, &ctx); + mnt_drop_write(path.mnt); + } + path_put(&path); +@@ -678,6 +664,9 @@ static int path_setxattr(const char __user *pathname, + lookup_flags |= LOOKUP_REVAL; + goto retry; + } ++ ++out: ++ kvfree(ctx.kvalue); + return error; + } + +@@ -698,20 +687,32 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname, + SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name, + const void __user *,value, size_t, size, int, flags) + { +- struct fd f = fdget(fd); +- int error = -EBADF; ++ struct xattr_name kname; ++ struct xattr_ctx ctx = { ++ .cvalue = value, ++ .kvalue = NULL, ++ .size = size, ++ .kname = &kname, ++ .flags = flags, ++ }; ++ int error; + ++ CLASS(fd, f)(fd); + if (!f.file) +- return error; ++ return -EBADF; ++ + audit_file(f.file); ++ error = setxattr_copy(name, &ctx); ++ if (error) ++ return error; ++ + error = mnt_want_write_file(f.file); + if (!error) { +- error = setxattr(file_mnt_idmap(f.file), +- f.file->f_path.dentry, name, +- value, size, flags); ++ error = do_setxattr(file_mnt_idmap(f.file), ++ f.file->f_path.dentry, &ctx); + mnt_drop_write_file(f.file); + } +- fdput(f); ++ kvfree(ctx.kvalue); + return error; + } + +@@ -900,9 +901,17 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size) + * Extended attribute REMOVE operations + */ + static long +-removexattr(struct mnt_idmap *idmap, struct dentry *d, +- const char __user *name) ++removexattr(struct mnt_idmap *idmap, struct dentry *d, const char *name) + { ++ if (is_posix_acl_xattr(name)) ++ return vfs_remove_acl(idmap, d, name); ++ return vfs_removexattr(idmap, d, name); ++} ++ ++static int path_removexattr(const char __user *pathname, ++ const char __user *name, unsigned int lookup_flags) ++{ ++ struct path path; + int error; + char kname[XATTR_NAME_MAX + 1]; + +@@ -911,25 +920,13 @@ removexattr(struct mnt_idmap *idmap, struct dentry *d, + error = -ERANGE; + if (error < 0) + return error; +- +- if (is_posix_acl_xattr(kname)) +- return vfs_remove_acl(idmap, d, kname); +- +- return vfs_removexattr(idmap, d, kname); +-} +- +-static int path_removexattr(const char __user *pathname, +- const char __user *name, unsigned int lookup_flags) +-{ +- struct path path; +- int error; + retry: + error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path); + if (error) + return error; + error = mnt_want_write(path.mnt); + if (!error) { +- error = removexattr(mnt_idmap(path.mnt), path.dentry, name); ++ error = removexattr(mnt_idmap(path.mnt), path.dentry, kname); + mnt_drop_write(path.mnt); + } + path_put(&path); +@@ -955,15 +952,23 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname, + SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name) + { + struct fd f = fdget(fd); ++ char kname[XATTR_NAME_MAX + 1]; + int error = -EBADF; + + if (!f.file) + return error; + audit_file(f.file); ++ ++ error = strncpy_from_user(kname, name, sizeof(kname)); ++ if (error == 0 || error == sizeof(kname)) ++ error = -ERANGE; ++ if (error < 0) ++ return error; ++ + error = mnt_want_write_file(f.file); + if (!error) { + error = removexattr(file_mnt_idmap(f.file), +- f.file->f_path.dentry, name); ++ f.file->f_path.dentry, kname); + mnt_drop_write_file(f.file); + } + fdput(f); +diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h +index 6126c977ece04..c0b69ffe7bdb4 100644 +--- a/include/acpi/cppc_acpi.h ++++ b/include/acpi/cppc_acpi.h +@@ -139,6 +139,7 @@ struct cppc_cpudata { + #ifdef CONFIG_ACPI_CPPC_LIB + extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf); + extern int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf); ++extern int cppc_get_highest_perf(int cpunum, u64 *highest_perf); + extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs); + extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls); + extern int cppc_set_enable(int cpu, bool enable); +@@ -165,6 +166,10 @@ static inline int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf) + { + return -ENOTSUPP; + } ++static inline int cppc_get_highest_perf(int cpunum, u64 *highest_perf) ++{ ++ return -ENOTSUPP; ++} + static inline int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs) + { + return -ENOTSUPP; +diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h +index 6ad02ad9c7b42..68fc1bd8d851e 100644 +--- a/include/linux/amd-pstate.h ++++ b/include/linux/amd-pstate.h +@@ -52,6 +52,9 @@ struct amd_aperf_mperf { + * @prev: Last Aperf/Mperf/tsc count value read from register + * @freq: current cpu frequency value + * @boost_supported: check whether the Processor or SBIOS supports boost mode ++ * @hw_prefcore: check whether HW supports preferred core featue. ++ * Only when hw_prefcore and early prefcore param are true, ++ * AMD P-State driver supports preferred core featue. + * @epp_policy: Last saved policy used to set energy-performance preference + * @epp_cached: Cached CPPC energy-performance preference value + * @policy: Cpufreq policy value +@@ -85,6 +88,7 @@ struct amd_cpudata { + + u64 freq; + bool boost_supported; ++ bool hw_prefcore; + + /* EPP feature related attributes*/ + s16 epp_policy; +diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h +index 31561e7897157..d4f2c8706042c 100644 +--- a/include/linux/bpf-cgroup.h ++++ b/include/linux/bpf-cgroup.h +@@ -138,11 +138,12 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, + enum cgroup_bpf_attach_type atype); + + int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, +- int *optname, char __user *optval, ++ int *optname, sockptr_t optval, + int *optlen, char **kernel_optval); ++ + int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, +- int optname, char __user *optval, +- int __user *optlen, int max_optlen, ++ int optname, sockptr_t optval, ++ sockptr_t optlen, int max_optlen, + int retval); + + int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level, +@@ -374,14 +375,6 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk, + __ret; \ + }) + +-#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \ +-({ \ +- int __ret = 0; \ +- if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \ +- get_user(__ret, optlen); \ +- __ret; \ +-}) +- + #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \ + max_optlen, retval) \ + ({ \ +@@ -499,7 +492,6 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, + #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) + #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; }) + #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; }) +-#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; }) + #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \ + optlen, max_optlen, retval) ({ retval; }) + #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \ +diff --git a/include/linux/mm.h b/include/linux/mm.h +index 3d617d0d69675..830b925c2d005 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -95,6 +95,10 @@ extern const int mmap_rnd_compat_bits_max; + extern int mmap_rnd_compat_bits __read_mostly; + #endif + ++#ifndef PHYSMEM_END ++# define PHYSMEM_END ((1ULL << MAX_PHYSMEM_BITS) - 1) ++#endif ++ + #include + #include + +diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h +index af7639c3b0a3a..8b7daccd11bef 100644 +--- a/include/linux/pgtable.h ++++ b/include/linux/pgtable.h +@@ -292,6 +292,27 @@ static inline pmd_t pmdp_get(pmd_t *pmdp) + } + #endif + ++#ifndef pudp_get ++static inline pud_t pudp_get(pud_t *pudp) ++{ ++ return READ_ONCE(*pudp); ++} ++#endif ++ ++#ifndef p4dp_get ++static inline p4d_t p4dp_get(p4d_t *p4dp) ++{ ++ return READ_ONCE(*p4dp); ++} ++#endif ++ ++#ifndef pgdp_get ++static inline pgd_t pgdp_get(pgd_t *pgdp) ++{ ++ return READ_ONCE(*pgdp); ++} ++#endif ++ + #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG + static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, +diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h +index 2c526c8d10cc4..25d0684d37b3e 100644 +--- a/include/linux/regulator/consumer.h ++++ b/include/linux/regulator/consumer.h +@@ -489,6 +489,14 @@ static inline int of_regulator_bulk_get_all(struct device *dev, struct device_no + return 0; + } + ++static inline int devm_regulator_bulk_get_const( ++ struct device *dev, int num_consumers, ++ const struct regulator_bulk_data *in_consumers, ++ struct regulator_bulk_data **out_consumers) ++{ ++ return 0; ++} ++ + static inline int regulator_bulk_enable(int num_consumers, + struct regulator_bulk_data *consumers) + { +diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h +index d2a280a42f3b8..2129d071c3725 100644 +--- a/include/net/bluetooth/hci.h ++++ b/include/net/bluetooth/hci.h +@@ -430,6 +430,7 @@ enum { + #define HCI_NCMD_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */ + #define HCI_ACL_TX_TIMEOUT msecs_to_jiffies(45000) /* 45 seconds */ + #define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ ++#define HCI_ACL_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */ + #define HCI_LE_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */ + #define HCI_LE_AUTOCONN_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */ + +@@ -644,6 +645,7 @@ enum { + #define HCI_ERROR_PIN_OR_KEY_MISSING 0x06 + #define HCI_ERROR_MEMORY_EXCEEDED 0x07 + #define HCI_ERROR_CONNECTION_TIMEOUT 0x08 ++#define HCI_ERROR_COMMAND_DISALLOWED 0x0c + #define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d + #define HCI_ERROR_REJ_BAD_ADDR 0x0f + #define HCI_ERROR_INVALID_PARAMETERS 0x12 +@@ -652,6 +654,7 @@ enum { + #define HCI_ERROR_REMOTE_POWER_OFF 0x15 + #define HCI_ERROR_LOCAL_HOST_TERM 0x16 + #define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18 ++#define HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE 0x1e + #define HCI_ERROR_INVALID_LL_PARAMS 0x1e + #define HCI_ERROR_UNSPECIFIED 0x1f + #define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c +diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h +index f89d6d43ba8f1..29f1549ee1114 100644 +--- a/include/net/bluetooth/hci_core.h ++++ b/include/net/bluetooth/hci_core.h +@@ -188,7 +188,6 @@ struct blocked_key { + struct smp_csrk { + bdaddr_t bdaddr; + u8 bdaddr_type; +- u8 link_type; + u8 type; + u8 val[16]; + }; +@@ -198,7 +197,6 @@ struct smp_ltk { + struct rcu_head rcu; + bdaddr_t bdaddr; + u8 bdaddr_type; +- u8 link_type; + u8 authenticated; + u8 type; + u8 enc_size; +@@ -213,7 +211,6 @@ struct smp_irk { + bdaddr_t rpa; + bdaddr_t bdaddr; + u8 addr_type; +- u8 link_type; + u8 val[16]; + }; + +@@ -221,8 +218,6 @@ struct link_key { + struct list_head list; + struct rcu_head rcu; + bdaddr_t bdaddr; +- u8 bdaddr_type; +- u8 link_type; + u8 type; + u8 val[HCI_LINK_KEY_SIZE]; + u8 pin_len; +@@ -1046,6 +1041,24 @@ static inline unsigned int hci_conn_count(struct hci_dev *hdev) + return c->acl_num + c->sco_num + c->le_num + c->iso_num; + } + ++static inline bool hci_conn_valid(struct hci_dev *hdev, struct hci_conn *conn) ++{ ++ struct hci_conn_hash *h = &hdev->conn_hash; ++ struct hci_conn *c; ++ ++ rcu_read_lock(); ++ ++ list_for_each_entry_rcu(c, &h->list, list) { ++ if (c == conn) { ++ rcu_read_unlock(); ++ return true; ++ } ++ } ++ rcu_read_unlock(); ++ ++ return false; ++} ++ + static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle) + { + struct hci_conn_hash *h = &hdev->conn_hash; +@@ -1422,7 +1435,6 @@ struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type, + bdaddr_t *dst, u8 role); + void hci_conn_del(struct hci_conn *conn); + void hci_conn_hash_flush(struct hci_dev *hdev); +-void hci_conn_check_pending(struct hci_dev *hdev); + + struct hci_chan *hci_chan_create(struct hci_conn *conn); + void hci_chan_del(struct hci_chan *chan); +@@ -1436,6 +1448,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, + struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, + u8 dst_type, bool dst_resolved, u8 sec_level, + u16 conn_timeout, u8 role); ++void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status); + struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, + u8 sec_level, u8 auth_type, + enum conn_reasons conn_reason); +diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h +index 4cb048bdcb1e4..3cb2d10cac930 100644 +--- a/include/net/bluetooth/hci_sync.h ++++ b/include/net/bluetooth/hci_sync.h +@@ -50,6 +50,22 @@ int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy); + int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy); ++int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, ++ void *data, hci_cmd_sync_work_destroy_t destroy); ++int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, ++ void *data, hci_cmd_sync_work_destroy_t destroy); ++int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, ++ void *data, hci_cmd_sync_work_destroy_t destroy); ++struct hci_cmd_sync_work_entry * ++hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, ++ void *data, hci_cmd_sync_work_destroy_t destroy); ++void hci_cmd_sync_cancel_entry(struct hci_dev *hdev, ++ struct hci_cmd_sync_work_entry *entry); ++bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, ++ void *data, hci_cmd_sync_work_destroy_t destroy); ++bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev, ++ hci_cmd_sync_work_func_t func, void *data, ++ hci_cmd_sync_work_destroy_t destroy); + + int hci_update_eir_sync(struct hci_dev *hdev); + int hci_update_class_sync(struct hci_dev *hdev); +@@ -129,8 +145,6 @@ struct hci_conn; + + int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason); + +-int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn); +- + int hci_le_create_cis_sync(struct hci_dev *hdev); + + int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle); +@@ -140,3 +154,9 @@ int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason); + int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle); + + int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle); ++ ++int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn); ++ ++int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn); ++ ++int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn); +diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h +index 37ccce56a73e8..28e110f733ffd 100644 +--- a/include/net/mana/mana.h ++++ b/include/net/mana/mana.h +@@ -97,6 +97,8 @@ struct mana_txq { + + atomic_t pending_sends; + ++ bool napi_initialized; ++ + struct mana_stats_tx stats; + }; + +diff --git a/include/net/sock.h b/include/net/sock.h +index 5942b5ff4c786..2a1aee5038482 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -1875,11 +1875,13 @@ int sk_setsockopt(struct sock *sk, int level, int optname, + sockptr_t optval, unsigned int optlen); + int sock_setsockopt(struct socket *sock, int level, int op, + sockptr_t optval, unsigned int optlen); ++int do_sock_setsockopt(struct socket *sock, bool compat, int level, ++ int optname, sockptr_t optval, int optlen); ++int do_sock_getsockopt(struct socket *sock, bool compat, int level, ++ int optname, sockptr_t optval, sockptr_t optlen); + + int sk_getsockopt(struct sock *sk, int level, int optname, + sockptr_t optval, sockptr_t optlen); +-int sock_getsockopt(struct socket *sock, int level, int op, +- char __user *optval, int __user *optlen); + int sock_gettstamp(struct socket *sock, void __user *userstamp, + bool timeval, bool time32); + struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, +diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h +index 8db7fd3f743e4..5eed091d4c291 100644 +--- a/include/uapi/drm/drm_fourcc.h ++++ b/include/uapi/drm/drm_fourcc.h +@@ -1474,6 +1474,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) + #define AMD_FMT_MOD_TILE_VER_GFX10 2 + #define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3 + #define AMD_FMT_MOD_TILE_VER_GFX11 4 ++#define AMD_FMT_MOD_TILE_VER_GFX12 5 + + /* + * 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical +@@ -1484,6 +1485,8 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) + /* + * 64K_D for non-32 bpp is the same for GFX9/GFX10/GFX10_RBPLUS and hence has + * GFX9 as canonical version. ++ * ++ * 64K_D_2D on GFX12 is identical to 64K_D on GFX11. + */ + #define AMD_FMT_MOD_TILE_GFX9_64K_D 10 + #define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25 +@@ -1491,6 +1494,21 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) + #define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27 + #define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31 + ++/* Gfx12 swizzle modes: ++ * 0 - LINEAR ++ * 1 - 256B_2D - 2D block dimensions ++ * 2 - 4KB_2D ++ * 3 - 64KB_2D ++ * 4 - 256KB_2D ++ * 5 - 4KB_3D - 3D block dimensions ++ * 6 - 64KB_3D ++ * 7 - 256KB_3D ++ */ ++#define AMD_FMT_MOD_TILE_GFX12_256B_2D 1 ++#define AMD_FMT_MOD_TILE_GFX12_4K_2D 2 ++#define AMD_FMT_MOD_TILE_GFX12_64K_2D 3 ++#define AMD_FMT_MOD_TILE_GFX12_256K_2D 4 ++ + #define AMD_FMT_MOD_DCC_BLOCK_64B 0 + #define AMD_FMT_MOD_DCC_BLOCK_128B 1 + #define AMD_FMT_MOD_DCC_BLOCK_256B 2 +diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c +index ac37bd53aee03..913a6a7e62ca6 100644 +--- a/kernel/bpf/cgroup.c ++++ b/kernel/bpf/cgroup.c +@@ -1799,7 +1799,7 @@ static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx, + } + + int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, +- int *optname, char __user *optval, ++ int *optname, sockptr_t optval, + int *optlen, char **kernel_optval) + { + struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); +@@ -1822,7 +1822,8 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, + + ctx.optlen = *optlen; + +- if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) { ++ if (copy_from_sockptr(ctx.optval, optval, ++ min(*optlen, max_optlen))) { + ret = -EFAULT; + goto out; + } +@@ -1889,8 +1890,8 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, + } + + int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, +- int optname, char __user *optval, +- int __user *optlen, int max_optlen, ++ int optname, sockptr_t optval, ++ sockptr_t optlen, int max_optlen, + int retval) + { + struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); +@@ -1917,8 +1918,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, + * one that kernel returned as well to let + * BPF programs inspect the value. + */ +- +- if (get_user(ctx.optlen, optlen)) { ++ if (copy_from_sockptr(&ctx.optlen, optlen, ++ sizeof(ctx.optlen))) { + ret = -EFAULT; + goto out; + } +@@ -1929,8 +1930,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, + } + orig_optlen = ctx.optlen; + +- if (copy_from_user(ctx.optval, optval, +- min(ctx.optlen, max_optlen)) != 0) { ++ if (copy_from_sockptr(ctx.optval, optval, ++ min(ctx.optlen, max_optlen))) { + ret = -EFAULT; + goto out; + } +@@ -1944,7 +1945,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, + if (ret < 0) + goto out; + +- if (optval && (ctx.optlen > max_optlen || ctx.optlen < 0)) { ++ if (!sockptr_is_null(optval) && ++ (ctx.optlen > max_optlen || ctx.optlen < 0)) { + if (orig_optlen > PAGE_SIZE && ctx.optlen >= 0) { + pr_info_once("bpf getsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n", + ctx.optlen, max_optlen); +@@ -1956,11 +1958,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, + } + + if (ctx.optlen != 0) { +- if (optval && copy_to_user(optval, ctx.optval, ctx.optlen)) { ++ if (!sockptr_is_null(optval) && ++ copy_to_sockptr(optval, ctx.optval, ctx.optlen)) { + ret = -EFAULT; + goto out; + } +- if (put_user(ctx.optlen, optlen)) { ++ if (copy_to_sockptr(optlen, &ctx.optlen, sizeof(ctx.optlen))) { + ret = -EFAULT; + goto out; + } +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 3f1a9cd7fc9ec..9d5699942273e 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -3064,8 +3064,10 @@ static int check_subprogs(struct bpf_verifier_env *env) + + if (code == (BPF_JMP | BPF_CALL) && + insn[i].src_reg == 0 && +- insn[i].imm == BPF_FUNC_tail_call) ++ insn[i].imm == BPF_FUNC_tail_call) { + subprog[cur_subprog].has_tail_call = true; ++ subprog[cur_subprog].tail_call_reachable = true; ++ } + if (BPF_CLASS(code) == BPF_LD && + (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND)) + subprog[cur_subprog].has_ld_abs = true; +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c +index 5eca6281d1aa6..660817c125e73 100644 +--- a/kernel/cgroup/cgroup.c ++++ b/kernel/cgroup/cgroup.c +@@ -1829,9 +1829,9 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask) + RCU_INIT_POINTER(scgrp->subsys[ssid], NULL); + rcu_assign_pointer(dcgrp->subsys[ssid], css); + ss->root = dst_root; +- css->cgroup = dcgrp; + + spin_lock_irq(&css_set_lock); ++ css->cgroup = dcgrp; + WARN_ON(!list_empty(&dcgrp->e_csets[ss->id])); + list_for_each_entry_safe(cset, cset_pos, &scgrp->e_csets[ss->id], + e_cset_node[ss->id]) { +diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c +index 4950e0b622b1f..cc19a3efea896 100644 +--- a/kernel/dma/map_benchmark.c ++++ b/kernel/dma/map_benchmark.c +@@ -89,6 +89,22 @@ static int map_benchmark_thread(void *data) + atomic64_add(map_sq, &map->sum_sq_map); + atomic64_add(unmap_sq, &map->sum_sq_unmap); + atomic64_inc(&map->loops); ++ ++ /* ++ * We may test for a long time so periodically check whether ++ * we need to schedule to avoid starving the others. Otherwise ++ * we may hangup the kernel in a non-preemptible kernel when ++ * the test kthreads number >= CPU number, the test kthreads ++ * will run endless on every CPU since the thread resposible ++ * for notifying the kthread stop (in do_map_benchmark()) ++ * could not be scheduled. ++ * ++ * Note this may degrade the test concurrency since the test ++ * threads may need to share the CPU time with other load ++ * in the system. So it's recommended to run this benchmark ++ * on an idle system. ++ */ ++ cond_resched(); + } + + out: +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 0f2b5610933d7..4d0abdace4e7c 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -1255,8 +1255,9 @@ static void put_ctx(struct perf_event_context *ctx) + * perf_event_context::mutex + * perf_event::child_mutex; + * perf_event_context::lock +- * perf_event::mmap_mutex + * mmap_lock ++ * perf_event::mmap_mutex ++ * perf_buffer::aux_mutex + * perf_addr_filters_head::lock + * + * cpu_hotplug_lock +@@ -6352,12 +6353,11 @@ static void perf_mmap_close(struct vm_area_struct *vma) + event->pmu->event_unmapped(event, vma->vm_mm); + + /* +- * rb->aux_mmap_count will always drop before rb->mmap_count and +- * event->mmap_count, so it is ok to use event->mmap_mutex to +- * serialize with perf_mmap here. ++ * The AUX buffer is strictly a sub-buffer, serialize using aux_mutex ++ * to avoid complications. + */ + if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && +- atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { ++ atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) { + /* + * Stop all AUX events that are writing to this buffer, + * so that we can free its AUX pages and corresponding PMU +@@ -6374,7 +6374,7 @@ static void perf_mmap_close(struct vm_area_struct *vma) + rb_free_aux(rb); + WARN_ON_ONCE(refcount_read(&rb->aux_refcount)); + +- mutex_unlock(&event->mmap_mutex); ++ mutex_unlock(&rb->aux_mutex); + } + + if (atomic_dec_and_test(&rb->mmap_count)) +@@ -6462,6 +6462,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) + struct perf_event *event = file->private_data; + unsigned long user_locked, user_lock_limit; + struct user_struct *user = current_user(); ++ struct mutex *aux_mutex = NULL; + struct perf_buffer *rb = NULL; + unsigned long locked, lock_limit; + unsigned long vma_size; +@@ -6510,6 +6511,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) + if (!rb) + goto aux_unlock; + ++ aux_mutex = &rb->aux_mutex; ++ mutex_lock(aux_mutex); ++ + aux_offset = READ_ONCE(rb->user_page->aux_offset); + aux_size = READ_ONCE(rb->user_page->aux_size); + +@@ -6660,6 +6664,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) + atomic_dec(&rb->mmap_count); + } + aux_unlock: ++ if (aux_mutex) ++ mutex_unlock(aux_mutex); + mutex_unlock(&event->mmap_mutex); + + /* +diff --git a/kernel/events/internal.h b/kernel/events/internal.h +index 386d21c7edfa0..f376b057320ce 100644 +--- a/kernel/events/internal.h ++++ b/kernel/events/internal.h +@@ -40,6 +40,7 @@ struct perf_buffer { + struct user_struct *mmap_user; + + /* AUX area */ ++ struct mutex aux_mutex; + long aux_head; + unsigned int aux_nest; + long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */ +diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c +index f1f4a627f93db..b0930b4185527 100644 +--- a/kernel/events/ring_buffer.c ++++ b/kernel/events/ring_buffer.c +@@ -333,6 +333,8 @@ ring_buffer_init(struct perf_buffer *rb, long watermark, int flags) + */ + if (!rb->nr_pages) + rb->paused = 1; ++ ++ mutex_init(&rb->aux_mutex); + } + + void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags) +diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c +index 3048589e2e851..4705571f80345 100644 +--- a/kernel/events/uprobes.c ++++ b/kernel/events/uprobes.c +@@ -1480,7 +1480,7 @@ static struct xol_area *__create_xol_area(unsigned long vaddr) + uprobe_opcode_t insn = UPROBE_SWBP_INSN; + struct xol_area *area; + +- area = kmalloc(sizeof(*area), GFP_KERNEL); ++ area = kzalloc(sizeof(*area), GFP_KERNEL); + if (unlikely(!area)) + goto out; + +@@ -1490,7 +1490,6 @@ static struct xol_area *__create_xol_area(unsigned long vaddr) + goto free_area; + + area->xol_mapping.name = "[uprobes]"; +- area->xol_mapping.fault = NULL; + area->xol_mapping.pages = area->pages; + area->pages[0] = alloc_page(GFP_HIGHUSER); + if (!area->pages[0]) +diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c +index f9a419cd22d4c..830344627e9f2 100644 +--- a/kernel/kexec_file.c ++++ b/kernel/kexec_file.c +@@ -728,7 +728,7 @@ static int kexec_calculate_store_digests(struct kimage *image) + + #ifdef CONFIG_CRASH_HOTPLUG + /* Exclude elfcorehdr segment to allow future changes via hotplug */ +- if (j == image->elfcorehdr_index) ++ if (i == image->elfcorehdr_index) + continue; + #endif + +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c +index 21db0df0eb000..bf3a28ee7d8f4 100644 +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -1624,6 +1624,7 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, + } + + static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock, ++ struct rt_mutex_base *lock, + struct rt_mutex_waiter *w) + { + /* +@@ -1636,10 +1637,10 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock, + if (build_ww_mutex() && w->ww_ctx) + return; + +- /* +- * Yell loudly and stop the task right here. +- */ ++ raw_spin_unlock_irq(&lock->wait_lock); ++ + WARN(1, "rtmutex deadlock detected\n"); ++ + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); +@@ -1693,7 +1694,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, + } else { + __set_current_state(TASK_RUNNING); + remove_waiter(lock, waiter); +- rt_mutex_handle_deadlock(ret, chwalk, waiter); ++ rt_mutex_handle_deadlock(ret, chwalk, lock, waiter); + } + + /* +diff --git a/kernel/resource.c b/kernel/resource.c +index e3f5680a564cf..ce127a829ead7 100644 +--- a/kernel/resource.c ++++ b/kernel/resource.c +@@ -1778,8 +1778,7 @@ static resource_size_t gfr_start(struct resource *base, resource_size_t size, + if (flags & GFR_DESCENDING) { + resource_size_t end; + +- end = min_t(resource_size_t, base->end, +- (1ULL << MAX_PHYSMEM_BITS) - 1); ++ end = min_t(resource_size_t, base->end, PHYSMEM_END); + return end - size + 1; + } + +@@ -1796,8 +1795,7 @@ static bool gfr_continue(struct resource *base, resource_size_t addr, + * @size did not wrap 0. + */ + return addr > addr - size && +- addr <= min_t(resource_size_t, base->end, +- (1ULL << MAX_PHYSMEM_BITS) - 1); ++ addr <= min_t(resource_size_t, base->end, PHYSMEM_END); + } + + static resource_size_t gfr_next(resource_size_t addr, resource_size_t size, +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 97571d390f184..9b406d9886541 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -6679,8 +6679,9 @@ static void __sched notrace __schedule(unsigned int sched_mode) + * + * Here are the schemes providing that barrier on the + * various architectures: +- * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC. +- * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC. ++ * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC, ++ * RISC-V. switch_mm() relies on membarrier_arch_switch_mm() ++ * on PowerPC and on RISC-V. + * - finish_lock_switch() for weakly-ordered + * architectures where spin_unlock is a full barrier, + * - switch_to() for arm64 (weakly-ordered, spin_unlock +diff --git a/kernel/smp.c b/kernel/smp.c +index 695eb13a276d2..3eeffeaf5450c 100644 +--- a/kernel/smp.c ++++ b/kernel/smp.c +@@ -1119,6 +1119,7 @@ int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) + + queue_work_on(cpu, system_wq, &sscs.work); + wait_for_completion(&sscs.done); ++ destroy_work_on_stack(&sscs.work); + + return sscs.ret; + } +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index fd398af792b4a..be878005e3449 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -4156,6 +4156,8 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu) + break; + entries++; + ring_buffer_iter_advance(buf_iter); ++ /* This could be a big loop */ ++ cond_resched(); + } + + per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries; +diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c +index a8e28f9b9271c..5b06f67879f5f 100644 +--- a/kernel/trace/trace_osnoise.c ++++ b/kernel/trace/trace_osnoise.c +@@ -252,6 +252,11 @@ static inline struct timerlat_variables *this_cpu_tmr_var(void) + return this_cpu_ptr(&per_cpu_timerlat_var); + } + ++/* ++ * Protect the interface. ++ */ ++static struct mutex interface_lock; ++ + /* + * tlat_var_reset - Reset the values of the given timerlat_variables + */ +@@ -259,14 +264,20 @@ static inline void tlat_var_reset(void) + { + struct timerlat_variables *tlat_var; + int cpu; ++ ++ /* Synchronize with the timerlat interfaces */ ++ mutex_lock(&interface_lock); + /* + * So far, all the values are initialized as 0, so + * zeroing the structure is perfect. + */ + for_each_cpu(cpu, cpu_online_mask) { + tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu); ++ if (tlat_var->kthread) ++ hrtimer_cancel(&tlat_var->timer); + memset(tlat_var, 0, sizeof(*tlat_var)); + } ++ mutex_unlock(&interface_lock); + } + #else /* CONFIG_TIMERLAT_TRACER */ + #define tlat_var_reset() do {} while (0) +@@ -331,11 +342,6 @@ struct timerlat_sample { + }; + #endif + +-/* +- * Protect the interface. +- */ +-static struct mutex interface_lock; +- + /* + * Tracer data. + */ +@@ -1612,6 +1618,7 @@ static int run_osnoise(void) + + static struct cpumask osnoise_cpumask; + static struct cpumask save_cpumask; ++static struct cpumask kthread_cpumask; + + /* + * osnoise_sleep - sleep until the next period +@@ -1675,6 +1682,7 @@ static inline int osnoise_migration_pending(void) + */ + mutex_lock(&interface_lock); + this_cpu_osn_var()->kthread = NULL; ++ cpumask_clear_cpu(smp_processor_id(), &kthread_cpumask); + mutex_unlock(&interface_lock); + + return 1; +@@ -1945,11 +1953,16 @@ static void stop_kthread(unsigned int cpu) + { + struct task_struct *kthread; + ++ mutex_lock(&interface_lock); + kthread = per_cpu(per_cpu_osnoise_var, cpu).kthread; + if (kthread) { +- if (test_bit(OSN_WORKLOAD, &osnoise_options)) { ++ per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL; ++ mutex_unlock(&interface_lock); ++ ++ if (cpumask_test_and_clear_cpu(cpu, &kthread_cpumask) && ++ !WARN_ON(!test_bit(OSN_WORKLOAD, &osnoise_options))) { + kthread_stop(kthread); +- } else { ++ } else if (!WARN_ON(test_bit(OSN_WORKLOAD, &osnoise_options))) { + /* + * This is a user thread waiting on the timerlat_fd. We need + * to close all users, and the best way to guarantee this is +@@ -1958,8 +1971,8 @@ static void stop_kthread(unsigned int cpu) + kill_pid(kthread->thread_pid, SIGKILL, 1); + put_task_struct(kthread); + } +- per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL; + } else { ++ mutex_unlock(&interface_lock); + /* if no workload, just return */ + if (!test_bit(OSN_WORKLOAD, &osnoise_options)) { + /* +@@ -1967,7 +1980,6 @@ static void stop_kthread(unsigned int cpu) + */ + per_cpu(per_cpu_osnoise_var, cpu).sampling = false; + barrier(); +- return; + } + } + } +@@ -1982,12 +1994,8 @@ static void stop_per_cpu_kthreads(void) + { + int cpu; + +- cpus_read_lock(); +- +- for_each_online_cpu(cpu) ++ for_each_possible_cpu(cpu) + stop_kthread(cpu); +- +- cpus_read_unlock(); + } + + /* +@@ -2021,6 +2029,7 @@ static int start_kthread(unsigned int cpu) + } + + per_cpu(per_cpu_osnoise_var, cpu).kthread = kthread; ++ cpumask_set_cpu(cpu, &kthread_cpumask); + + return 0; + } +@@ -2048,8 +2057,16 @@ static int start_per_cpu_kthreads(void) + */ + cpumask_and(current_mask, cpu_online_mask, &osnoise_cpumask); + +- for_each_possible_cpu(cpu) ++ for_each_possible_cpu(cpu) { ++ if (cpumask_test_and_clear_cpu(cpu, &kthread_cpumask)) { ++ struct task_struct *kthread; ++ ++ kthread = per_cpu(per_cpu_osnoise_var, cpu).kthread; ++ if (!WARN_ON(!kthread)) ++ kthread_stop(kthread); ++ } + per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL; ++ } + + for_each_cpu(cpu, current_mask) { + retval = start_kthread(cpu); +@@ -2579,7 +2596,8 @@ static int timerlat_fd_release(struct inode *inode, struct file *file) + osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu); + tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu); + +- hrtimer_cancel(&tlat_var->timer); ++ if (tlat_var->kthread) ++ hrtimer_cancel(&tlat_var->timer); + memset(tlat_var, 0, sizeof(*tlat_var)); + + osn_var->sampling = 0; +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index 8c7bafbee1b13..7fa1c7c9151ae 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -6456,10 +6456,18 @@ static void wq_watchdog_timer_fn(struct timer_list *unused) + + notrace void wq_watchdog_touch(int cpu) + { ++ unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ; ++ unsigned long touch_ts = READ_ONCE(wq_watchdog_touched); ++ unsigned long now = jiffies; ++ + if (cpu >= 0) +- per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; ++ per_cpu(wq_watchdog_touched_cpu, cpu) = now; ++ else ++ WARN_ONCE(1, "%s should be called with valid CPU", __func__); + +- wq_watchdog_touched = jiffies; ++ /* Don't unnecessarily store to global cacheline */ ++ if (time_after(now, touch_ts + thresh / 4)) ++ WRITE_ONCE(wq_watchdog_touched, jiffies); + } + + static void wq_watchdog_set_thresh(unsigned long thresh) +diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c +index 7dfa88282b006..78f081d695d0b 100644 +--- a/lib/generic-radix-tree.c ++++ b/lib/generic-radix-tree.c +@@ -131,6 +131,8 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset, + if ((v = cmpxchg_release(&radix->root, r, new_root)) == r) { + v = new_root; + new_node = NULL; ++ } else { ++ new_node->children[0] = NULL; + } + } + +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c +index f36525a595a93..9beed7c71a8e9 100644 +--- a/mm/memory_hotplug.c ++++ b/mm/memory_hotplug.c +@@ -1610,7 +1610,7 @@ struct range __weak arch_get_mappable_range(void) + + struct range mhp_get_pluggable_range(bool need_mapping) + { +- const u64 max_phys = (1ULL << MAX_PHYSMEM_BITS) - 1; ++ const u64 max_phys = PHYSMEM_END; + struct range mhp_range; + + if (need_mapping) { +diff --git a/mm/sparse.c b/mm/sparse.c +index 338cf946dee8d..0706113c4c843 100644 +--- a/mm/sparse.c ++++ b/mm/sparse.c +@@ -129,7 +129,7 @@ static inline int sparse_early_nid(struct mem_section *section) + static void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, + unsigned long *end_pfn) + { +- unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); ++ unsigned long max_sparsemem_pfn = (PHYSMEM_END + 1) >> PAGE_SHIFT; + + /* + * Sanity checks - do not allow an architecture to pass +diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c +index e76faba102797..92fe2a76f4b51 100644 +--- a/mm/userfaultfd.c ++++ b/mm/userfaultfd.c +@@ -699,27 +699,30 @@ static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm, + } + + dst_pmdval = pmdp_get_lockless(dst_pmd); +- /* +- * If the dst_pmd is mapped as THP don't +- * override it and just be strict. +- */ +- if (unlikely(pmd_trans_huge(dst_pmdval))) { +- err = -EEXIST; +- break; +- } + if (unlikely(pmd_none(dst_pmdval)) && + unlikely(__pte_alloc(dst_mm, dst_pmd))) { + err = -ENOMEM; + break; + } +- /* If an huge pmd materialized from under us fail */ +- if (unlikely(pmd_trans_huge(*dst_pmd))) { ++ dst_pmdval = pmdp_get_lockless(dst_pmd); ++ /* ++ * If the dst_pmd is THP don't override it and just be strict. ++ * (This includes the case where the PMD used to be THP and ++ * changed back to none after __pte_alloc().) ++ */ ++ if (unlikely(!pmd_present(dst_pmdval) || pmd_trans_huge(dst_pmdval) || ++ pmd_devmap(dst_pmdval))) { ++ err = -EEXIST; ++ break; ++ } ++ if (unlikely(pmd_bad(dst_pmdval))) { + err = -EFAULT; + break; + } +- +- BUG_ON(pmd_none(*dst_pmd)); +- BUG_ON(pmd_trans_huge(*dst_pmd)); ++ /* ++ * For shmem mappings, khugepaged is allowed to remove page ++ * tables under us; pte_offset_map_lock() will deal with that. ++ */ + + err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, + src_addr, flags, &folio); +diff --git a/mm/vmalloc.c b/mm/vmalloc.c +index 732ff66d1b513..0148be0814af7 100644 +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -2066,6 +2066,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) + vb->dirty_max = 0; + bitmap_set(vb->used_map, 0, (1UL << order)); + INIT_LIST_HEAD(&vb->free_list); ++ vb->cpu = raw_smp_processor_id(); + + xa = addr_to_vb_xa(va->va_start); + vb_idx = addr_to_vb_idx(va->va_start); +@@ -2082,7 +2083,6 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) + * integrity together with list_for_each_rcu from read + * side. + */ +- vb->cpu = raw_smp_processor_id(); + vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu); + spin_lock(&vbq->lock); + list_add_tail_rcu(&vb->free_list, &vbq->free); +diff --git a/mm/vmscan.c b/mm/vmscan.c +index 83fa8e924f8ae..81533bed0b462 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -2261,25 +2261,6 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec, + + } + +-#ifdef CONFIG_CMA +-/* +- * It is waste of effort to scan and reclaim CMA pages if it is not available +- * for current allocation context. Kswapd can not be enrolled as it can not +- * distinguish this scenario by using sc->gfp_mask = GFP_KERNEL +- */ +-static bool skip_cma(struct folio *folio, struct scan_control *sc) +-{ +- return !current_is_kswapd() && +- gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE && +- get_pageblock_migratetype(&folio->page) == MIGRATE_CMA; +-} +-#else +-static bool skip_cma(struct folio *folio, struct scan_control *sc) +-{ +- return false; +-} +-#endif +- + /* + * Isolating page from the lruvec to fill in @dst list by nr_to_scan times. + * +@@ -2326,8 +2307,7 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan, + nr_pages = folio_nr_pages(folio); + total_scan += nr_pages; + +- if (folio_zonenum(folio) > sc->reclaim_idx || +- skip_cma(folio, sc)) { ++ if (folio_zonenum(folio) > sc->reclaim_idx) { + nr_skipped[folio_zonenum(folio)] += nr_pages; + move_to = &folios_skipped; + goto move; +@@ -4971,7 +4951,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c + } + + /* ineligible */ +- if (zone > sc->reclaim_idx || skip_cma(folio, sc)) { ++ if (zone > sc->reclaim_idx) { + gen = folio_inc_gen(lruvec, folio, false); + list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); + return true; +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c +index dc1c07c7d4ff9..d8a01eb016ad0 100644 +--- a/net/bluetooth/hci_conn.c ++++ b/net/bluetooth/hci_conn.c +@@ -68,7 +68,7 @@ static const struct sco_param esco_param_msbc[] = { + }; + + /* This function requires the caller holds hdev->lock */ +-static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status) ++void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status) + { + struct hci_conn_params *params; + struct hci_dev *hdev = conn->hdev; +@@ -178,64 +178,6 @@ static void hci_conn_cleanup(struct hci_conn *conn) + hci_dev_put(hdev); + } + +-static void hci_acl_create_connection(struct hci_conn *conn) +-{ +- struct hci_dev *hdev = conn->hdev; +- struct inquiry_entry *ie; +- struct hci_cp_create_conn cp; +- +- BT_DBG("hcon %p", conn); +- +- /* Many controllers disallow HCI Create Connection while it is doing +- * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create +- * Connection. This may cause the MGMT discovering state to become false +- * without user space's request but it is okay since the MGMT Discovery +- * APIs do not promise that discovery should be done forever. Instead, +- * the user space monitors the status of MGMT discovering and it may +- * request for discovery again when this flag becomes false. +- */ +- if (test_bit(HCI_INQUIRY, &hdev->flags)) { +- /* Put this connection to "pending" state so that it will be +- * executed after the inquiry cancel command complete event. +- */ +- conn->state = BT_CONNECT2; +- hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); +- return; +- } +- +- conn->state = BT_CONNECT; +- conn->out = true; +- conn->role = HCI_ROLE_MASTER; +- +- conn->attempt++; +- +- conn->link_policy = hdev->link_policy; +- +- memset(&cp, 0, sizeof(cp)); +- bacpy(&cp.bdaddr, &conn->dst); +- cp.pscan_rep_mode = 0x02; +- +- ie = hci_inquiry_cache_lookup(hdev, &conn->dst); +- if (ie) { +- if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) { +- cp.pscan_rep_mode = ie->data.pscan_rep_mode; +- cp.pscan_mode = ie->data.pscan_mode; +- cp.clock_offset = ie->data.clock_offset | +- cpu_to_le16(0x8000); +- } +- +- memcpy(conn->dev_class, ie->data.dev_class, 3); +- } +- +- cp.pkt_type = cpu_to_le16(conn->pkt_type); +- if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) +- cp.role_switch = 0x01; +- else +- cp.role_switch = 0x00; +- +- hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp); +-} +- + int hci_disconnect(struct hci_conn *conn, __u8 reason) + { + BT_DBG("hcon %p", conn); +@@ -1201,6 +1143,9 @@ void hci_conn_del(struct hci_conn *conn) + * rest of hci_conn_del. + */ + hci_conn_cleanup(conn); ++ ++ /* Dequeue callbacks using connection pointer as data */ ++ hci_cmd_sync_dequeue(hdev, NULL, conn, NULL); + } + + struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type) +@@ -1334,53 +1279,6 @@ u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle) + return 0; + } + +-static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err) +-{ +- struct hci_conn *conn; +- u16 handle = PTR_UINT(data); +- +- conn = hci_conn_hash_lookup_handle(hdev, handle); +- if (!conn) +- return; +- +- bt_dev_dbg(hdev, "err %d", err); +- +- hci_dev_lock(hdev); +- +- if (!err) { +- hci_connect_le_scan_cleanup(conn, 0x00); +- goto done; +- } +- +- /* Check if connection is still pending */ +- if (conn != hci_lookup_le_connect(hdev)) +- goto done; +- +- /* Flush to make sure we send create conn cancel command if needed */ +- flush_delayed_work(&conn->le_conn_timeout); +- hci_conn_failed(conn, bt_status(err)); +- +-done: +- hci_dev_unlock(hdev); +-} +- +-static int hci_connect_le_sync(struct hci_dev *hdev, void *data) +-{ +- struct hci_conn *conn; +- u16 handle = PTR_UINT(data); +- +- conn = hci_conn_hash_lookup_handle(hdev, handle); +- if (!conn) +- return 0; +- +- bt_dev_dbg(hdev, "conn %p", conn); +- +- clear_bit(HCI_CONN_SCANNING, &conn->flags); +- conn->state = BT_CONNECT; +- +- return hci_le_create_conn_sync(hdev, conn); +-} +- + struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, + u8 dst_type, bool dst_resolved, u8 sec_level, + u16 conn_timeout, u8 role) +@@ -1447,9 +1345,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, + conn->sec_level = BT_SECURITY_LOW; + conn->conn_timeout = conn_timeout; + +- err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, +- UINT_PTR(conn->handle), +- create_le_conn_complete); ++ err = hci_connect_le_sync(hdev, conn); + if (err) { + hci_conn_del(conn); + return ERR_PTR(err); +@@ -1702,10 +1598,17 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, + + acl->conn_reason = conn_reason; + if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { ++ int err; ++ + acl->sec_level = BT_SECURITY_LOW; + acl->pending_sec_level = sec_level; + acl->auth_type = auth_type; +- hci_acl_create_connection(acl); ++ ++ err = hci_connect_acl_sync(hdev, acl); ++ if (err) { ++ hci_conn_del(acl); ++ return ERR_PTR(err); ++ } + } + + return acl; +@@ -2616,22 +2519,6 @@ void hci_conn_hash_flush(struct hci_dev *hdev) + } + } + +-/* Check pending connect attempts */ +-void hci_conn_check_pending(struct hci_dev *hdev) +-{ +- struct hci_conn *conn; +- +- BT_DBG("hdev %s", hdev->name); +- +- hci_dev_lock(hdev); +- +- conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); +- if (conn) +- hci_acl_create_connection(conn); +- +- hci_dev_unlock(hdev); +-} +- + static u32 get_link_mode(struct hci_conn *conn) + { + u32 link_mode = 0; +@@ -2947,12 +2834,10 @@ u32 hci_conn_get_phy(struct hci_conn *conn) + + static int abort_conn_sync(struct hci_dev *hdev, void *data) + { +- struct hci_conn *conn; +- u16 handle = PTR_UINT(data); ++ struct hci_conn *conn = data; + +- conn = hci_conn_hash_lookup_handle(hdev, handle); +- if (!conn) +- return 0; ++ if (!hci_conn_valid(hdev, conn)) ++ return -ECANCELED; + + return hci_abort_conn_sync(hdev, conn, conn->abort_reason); + } +@@ -2980,14 +2865,21 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason) + */ + if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) { + switch (hci_skb_event(hdev->sent_cmd)) { ++ case HCI_EV_CONN_COMPLETE: + case HCI_EV_LE_CONN_COMPLETE: + case HCI_EV_LE_ENHANCED_CONN_COMPLETE: + case HCI_EVT_LE_CIS_ESTABLISHED: + hci_cmd_sync_cancel(hdev, ECANCELED); + break; + } ++ /* Cancel connect attempt if still queued/pending */ ++ } else if (!hci_cancel_connect_sync(hdev, conn)) { ++ return 0; + } + +- return hci_cmd_sync_queue(hdev, abort_conn_sync, UINT_PTR(conn->handle), +- NULL); ++ /* Run immediately if on cmd_sync_work since this may be called ++ * as a result to MGMT_OP_DISCONNECT/MGMT_OP_UNPAIR which does ++ * already queue its callback on cmd_sync_work. ++ */ ++ return hci_cmd_sync_run_once(hdev, abort_conn_sync, conn, NULL); + } +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c +index 727f040b65297..d81c7fccdd404 100644 +--- a/net/bluetooth/hci_event.c ++++ b/net/bluetooth/hci_event.c +@@ -93,11 +93,11 @@ static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data, + /* It is possible that we receive Inquiry Complete event right + * before we receive Inquiry Cancel Command Complete event, in + * which case the latter event should have status of Command +- * Disallowed (0x0c). This should not be treated as error, since ++ * Disallowed. This should not be treated as error, since + * we actually achieve what Inquiry Cancel wants to achieve, + * which is to end the last Inquiry session. + */ +- if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { ++ if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) { + bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); + rp->status = 0x00; + } +@@ -118,8 +118,6 @@ static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data, + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + hci_dev_unlock(hdev); + +- hci_conn_check_pending(hdev); +- + return rp->status; + } + +@@ -150,8 +148,6 @@ static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data, + + hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); + +- hci_conn_check_pending(hdev); +- + return rp->status; + } + +@@ -2257,10 +2253,8 @@ static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) + { + bt_dev_dbg(hdev, "status 0x%2.2x", status); + +- if (status) { +- hci_conn_check_pending(hdev); ++ if (status) + return; +- } + + if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY)) + set_bit(HCI_INQUIRY, &hdev->flags); +@@ -2285,12 +2279,9 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) + + if (status) { + if (conn && conn->state == BT_CONNECT) { +- if (status != 0x0c || conn->attempt > 2) { +- conn->state = BT_CLOSED; +- hci_connect_cfm(conn, status); +- hci_conn_del(conn); +- } else +- conn->state = BT_CONNECT2; ++ conn->state = BT_CLOSED; ++ hci_connect_cfm(conn, status); ++ hci_conn_del(conn); + } + } else { + if (!conn) { +@@ -2980,8 +2971,6 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data, + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + +- hci_conn_check_pending(hdev); +- + if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) + return; + +@@ -3228,8 +3217,6 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, + + unlock: + hci_dev_unlock(hdev); +- +- hci_conn_check_pending(hdev); + } + + static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) +@@ -6430,7 +6417,7 @@ static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data, + * transition into connected state and mark it as + * successful. + */ +- if (!conn->out && ev->status == 0x1a && ++ if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE && + (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) + status = 0x00; + else +diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c +index 38fee34887d8a..af7817a7c585b 100644 +--- a/net/bluetooth/hci_sync.c ++++ b/net/bluetooth/hci_sync.c +@@ -114,7 +114,7 @@ static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen, + skb_queue_tail(&req->cmd_q, skb); + } + +-static int hci_cmd_sync_run(struct hci_request *req) ++static int hci_req_sync_run(struct hci_request *req) + { + struct hci_dev *hdev = req->hdev; + struct sk_buff *skb; +@@ -164,7 +164,7 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, + + hdev->req_status = HCI_REQ_PEND; + +- err = hci_cmd_sync_run(&req); ++ err = hci_req_sync_run(&req); + if (err < 0) + return ERR_PTR(err); + +@@ -651,6 +651,17 @@ void hci_cmd_sync_init(struct hci_dev *hdev) + INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); + } + ++static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev, ++ struct hci_cmd_sync_work_entry *entry, ++ int err) ++{ ++ if (entry->destroy) ++ entry->destroy(hdev, entry->data, err); ++ ++ list_del(&entry->list); ++ kfree(entry); ++} ++ + void hci_cmd_sync_clear(struct hci_dev *hdev) + { + struct hci_cmd_sync_work_entry *entry, *tmp; +@@ -659,13 +670,8 @@ void hci_cmd_sync_clear(struct hci_dev *hdev) + cancel_work_sync(&hdev->reenable_adv_work); + + mutex_lock(&hdev->cmd_sync_work_lock); +- list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { +- if (entry->destroy) +- entry->destroy(hdev, entry->data, -ECANCELED); +- +- list_del(&entry->list); +- kfree(entry); +- } ++ list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) ++ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); + mutex_unlock(&hdev->cmd_sync_work_lock); + } + +@@ -757,6 +763,153 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + } + EXPORT_SYMBOL(hci_cmd_sync_queue); + ++static struct hci_cmd_sync_work_entry * ++_hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, ++ void *data, hci_cmd_sync_work_destroy_t destroy) ++{ ++ struct hci_cmd_sync_work_entry *entry, *tmp; ++ ++ list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { ++ if (func && entry->func != func) ++ continue; ++ ++ if (data && entry->data != data) ++ continue; ++ ++ if (destroy && entry->destroy != destroy) ++ continue; ++ ++ return entry; ++ } ++ ++ return NULL; ++} ++ ++/* Queue HCI command entry once: ++ * ++ * - Lookup if an entry already exist and only if it doesn't creates a new entry ++ * and queue it. ++ */ ++int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, ++ void *data, hci_cmd_sync_work_destroy_t destroy) ++{ ++ if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy)) ++ return 0; ++ ++ return hci_cmd_sync_queue(hdev, func, data, destroy); ++} ++EXPORT_SYMBOL(hci_cmd_sync_queue_once); ++ ++/* Run HCI command: ++ * ++ * - hdev must be running ++ * - if on cmd_sync_work then run immediately otherwise queue ++ */ ++int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, ++ void *data, hci_cmd_sync_work_destroy_t destroy) ++{ ++ /* Only queue command if hdev is running which means it had been opened ++ * and is either on init phase or is already up. ++ */ ++ if (!test_bit(HCI_RUNNING, &hdev->flags)) ++ return -ENETDOWN; ++ ++ /* If on cmd_sync_work then run immediately otherwise queue */ ++ if (current_work() == &hdev->cmd_sync_work) ++ return func(hdev, data); ++ ++ return hci_cmd_sync_submit(hdev, func, data, destroy); ++} ++EXPORT_SYMBOL(hci_cmd_sync_run); ++ ++/* Run HCI command entry once: ++ * ++ * - Lookup if an entry already exist and only if it doesn't creates a new entry ++ * and run it. ++ * - if on cmd_sync_work then run immediately otherwise queue ++ */ ++int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, ++ void *data, hci_cmd_sync_work_destroy_t destroy) ++{ ++ if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy)) ++ return 0; ++ ++ return hci_cmd_sync_run(hdev, func, data, destroy); ++} ++EXPORT_SYMBOL(hci_cmd_sync_run_once); ++ ++/* Lookup HCI command entry: ++ * ++ * - Return first entry that matches by function callback or data or ++ * destroy callback. ++ */ ++struct hci_cmd_sync_work_entry * ++hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, ++ void *data, hci_cmd_sync_work_destroy_t destroy) ++{ ++ struct hci_cmd_sync_work_entry *entry; ++ ++ mutex_lock(&hdev->cmd_sync_work_lock); ++ entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy); ++ mutex_unlock(&hdev->cmd_sync_work_lock); ++ ++ return entry; ++} ++EXPORT_SYMBOL(hci_cmd_sync_lookup_entry); ++ ++/* Cancel HCI command entry */ ++void hci_cmd_sync_cancel_entry(struct hci_dev *hdev, ++ struct hci_cmd_sync_work_entry *entry) ++{ ++ mutex_lock(&hdev->cmd_sync_work_lock); ++ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); ++ mutex_unlock(&hdev->cmd_sync_work_lock); ++} ++EXPORT_SYMBOL(hci_cmd_sync_cancel_entry); ++ ++/* Dequeue one HCI command entry: ++ * ++ * - Lookup and cancel first entry that matches. ++ */ ++bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev, ++ hci_cmd_sync_work_func_t func, ++ void *data, hci_cmd_sync_work_destroy_t destroy) ++{ ++ struct hci_cmd_sync_work_entry *entry; ++ ++ entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy); ++ if (!entry) ++ return false; ++ ++ hci_cmd_sync_cancel_entry(hdev, entry); ++ ++ return true; ++} ++EXPORT_SYMBOL(hci_cmd_sync_dequeue_once); ++ ++/* Dequeue HCI command entry: ++ * ++ * - Lookup and cancel any entry that matches by function callback or data or ++ * destroy callback. ++ */ ++bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, ++ void *data, hci_cmd_sync_work_destroy_t destroy) ++{ ++ struct hci_cmd_sync_work_entry *entry; ++ bool ret = false; ++ ++ mutex_lock(&hdev->cmd_sync_work_lock); ++ while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data, ++ destroy))) { ++ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); ++ ret = true; ++ } ++ mutex_unlock(&hdev->cmd_sync_work_lock); ++ ++ return ret; ++} ++EXPORT_SYMBOL(hci_cmd_sync_dequeue); ++ + int hci_update_eir_sync(struct hci_dev *hdev) + { + struct hci_cp_write_eir cp; +@@ -3048,7 +3201,8 @@ int hci_update_passive_scan(struct hci_dev *hdev) + hci_dev_test_flag(hdev, HCI_UNREGISTER)) + return 0; + +- return hci_cmd_sync_queue(hdev, update_passive_scan_sync, NULL, NULL); ++ return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL, ++ NULL); + } + + int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val) +@@ -6254,12 +6408,21 @@ static int hci_le_ext_create_conn_sync(struct hci_dev *hdev, + conn->conn_timeout, NULL); + } + +-int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn) ++static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data) + { + struct hci_cp_le_create_conn cp; + struct hci_conn_params *params; + u8 own_addr_type; + int err; ++ struct hci_conn *conn = data; ++ ++ if (!hci_conn_valid(hdev, conn)) ++ return -ECANCELED; ++ ++ bt_dev_dbg(hdev, "conn %p", conn); ++ ++ clear_bit(HCI_CONN_SCANNING, &conn->flags); ++ conn->state = BT_CONNECT; + + /* If requested to connect as peripheral use directed advertising */ + if (conn->role == HCI_ROLE_SLAVE) { +@@ -6577,3 +6740,125 @@ int hci_update_adv_data(struct hci_dev *hdev, u8 instance) + return hci_cmd_sync_queue(hdev, _update_adv_data_sync, + UINT_PTR(instance), NULL); + } ++ ++static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data) ++{ ++ struct hci_conn *conn = data; ++ struct inquiry_entry *ie; ++ struct hci_cp_create_conn cp; ++ int err; ++ ++ if (!hci_conn_valid(hdev, conn)) ++ return -ECANCELED; ++ ++ /* Many controllers disallow HCI Create Connection while it is doing ++ * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create ++ * Connection. This may cause the MGMT discovering state to become false ++ * without user space's request but it is okay since the MGMT Discovery ++ * APIs do not promise that discovery should be done forever. Instead, ++ * the user space monitors the status of MGMT discovering and it may ++ * request for discovery again when this flag becomes false. ++ */ ++ if (test_bit(HCI_INQUIRY, &hdev->flags)) { ++ err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0, ++ NULL, HCI_CMD_TIMEOUT); ++ if (err) ++ bt_dev_warn(hdev, "Failed to cancel inquiry %d", err); ++ } ++ ++ conn->state = BT_CONNECT; ++ conn->out = true; ++ conn->role = HCI_ROLE_MASTER; ++ ++ conn->attempt++; ++ ++ conn->link_policy = hdev->link_policy; ++ ++ memset(&cp, 0, sizeof(cp)); ++ bacpy(&cp.bdaddr, &conn->dst); ++ cp.pscan_rep_mode = 0x02; ++ ++ ie = hci_inquiry_cache_lookup(hdev, &conn->dst); ++ if (ie) { ++ if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) { ++ cp.pscan_rep_mode = ie->data.pscan_rep_mode; ++ cp.pscan_mode = ie->data.pscan_mode; ++ cp.clock_offset = ie->data.clock_offset | ++ cpu_to_le16(0x8000); ++ } ++ ++ memcpy(conn->dev_class, ie->data.dev_class, 3); ++ } ++ ++ cp.pkt_type = cpu_to_le16(conn->pkt_type); ++ if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) ++ cp.role_switch = 0x01; ++ else ++ cp.role_switch = 0x00; ++ ++ return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN, ++ sizeof(cp), &cp, ++ HCI_EV_CONN_COMPLETE, ++ conn->conn_timeout, NULL); ++} ++ ++int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn) ++{ ++ return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn, ++ NULL); ++} ++ ++static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err) ++{ ++ struct hci_conn *conn = data; ++ ++ bt_dev_dbg(hdev, "err %d", err); ++ ++ if (err == -ECANCELED) ++ return; ++ ++ hci_dev_lock(hdev); ++ ++ if (!hci_conn_valid(hdev, conn)) ++ goto done; ++ ++ if (!err) { ++ hci_connect_le_scan_cleanup(conn, 0x00); ++ goto done; ++ } ++ ++ /* Check if connection is still pending */ ++ if (conn != hci_lookup_le_connect(hdev)) ++ goto done; ++ ++ /* Flush to make sure we send create conn cancel command if needed */ ++ flush_delayed_work(&conn->le_conn_timeout); ++ hci_conn_failed(conn, bt_status(err)); ++ ++done: ++ hci_dev_unlock(hdev); ++} ++ ++int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn) ++{ ++ return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn, ++ create_le_conn_complete); ++} ++ ++int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn) ++{ ++ if (conn->state != BT_OPEN) ++ return -EINVAL; ++ ++ switch (conn->type) { ++ case ACL_LINK: ++ return !hci_cmd_sync_dequeue_once(hdev, ++ hci_acl_create_conn_sync, ++ conn, NULL); ++ case LE_LINK: ++ return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync, ++ conn, create_le_conn_complete); ++ } ++ ++ return -ENOENT; ++} +diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c +index 3533ac679e42c..4ae9029b5785f 100644 +--- a/net/bluetooth/mgmt.c ++++ b/net/bluetooth/mgmt.c +@@ -2824,16 +2824,6 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, + bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys, + key_count); + +- for (i = 0; i < key_count; i++) { +- struct mgmt_link_key_info *key = &cp->keys[i]; +- +- /* Considering SMP over BREDR/LE, there is no need to check addr_type */ +- if (key->type > 0x08) +- return mgmt_cmd_status(sk, hdev->id, +- MGMT_OP_LOAD_LINK_KEYS, +- MGMT_STATUS_INVALID_PARAMS); +- } +- + hci_dev_lock(hdev); + + hci_link_keys_clear(hdev); +@@ -2858,6 +2848,19 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, + continue; + } + ++ if (key->addr.type != BDADDR_BREDR) { ++ bt_dev_warn(hdev, ++ "Invalid link address type %u for %pMR", ++ key->addr.type, &key->addr.bdaddr); ++ continue; ++ } ++ ++ if (key->type > 0x08) { ++ bt_dev_warn(hdev, "Invalid link key type %u for %pMR", ++ key->type, &key->addr.bdaddr); ++ continue; ++ } ++ + /* Always ignore debug keys and require a new pairing if + * the user wants to use them. + */ +@@ -2915,7 +2918,12 @@ static int unpair_device_sync(struct hci_dev *hdev, void *data) + if (!conn) + return 0; + +- return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM); ++ /* Disregard any possible error since the likes of hci_abort_conn_sync ++ * will clean up the connection no matter the error. ++ */ ++ hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); ++ ++ return 0; + } + + static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, +@@ -3047,13 +3055,44 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, + return err; + } + ++static void disconnect_complete(struct hci_dev *hdev, void *data, int err) ++{ ++ struct mgmt_pending_cmd *cmd = data; ++ ++ cmd->cmd_complete(cmd, mgmt_status(err)); ++ mgmt_pending_free(cmd); ++} ++ ++static int disconnect_sync(struct hci_dev *hdev, void *data) ++{ ++ struct mgmt_pending_cmd *cmd = data; ++ struct mgmt_cp_disconnect *cp = cmd->param; ++ struct hci_conn *conn; ++ ++ if (cp->addr.type == BDADDR_BREDR) ++ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, ++ &cp->addr.bdaddr); ++ else ++ conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, ++ le_addr_type(cp->addr.type)); ++ ++ if (!conn) ++ return -ENOTCONN; ++ ++ /* Disregard any possible error since the likes of hci_abort_conn_sync ++ * will clean up the connection no matter the error. ++ */ ++ hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); ++ ++ return 0; ++} ++ + static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) + { + struct mgmt_cp_disconnect *cp = data; + struct mgmt_rp_disconnect rp; + struct mgmt_pending_cmd *cmd; +- struct hci_conn *conn; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); +@@ -3076,27 +3115,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, + goto failed; + } + +- if (pending_find(MGMT_OP_DISCONNECT, hdev)) { +- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, +- MGMT_STATUS_BUSY, &rp, sizeof(rp)); +- goto failed; +- } +- +- if (cp->addr.type == BDADDR_BREDR) +- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, +- &cp->addr.bdaddr); +- else +- conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, +- le_addr_type(cp->addr.type)); +- +- if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) { +- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, +- MGMT_STATUS_NOT_CONNECTED, &rp, +- sizeof(rp)); +- goto failed; +- } +- +- cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len); ++ cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len); + if (!cmd) { + err = -ENOMEM; + goto failed; +@@ -3104,9 +3123,10 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, + + cmd->cmd_complete = generic_cmd_complete; + +- err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM); ++ err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd, ++ disconnect_complete); + if (err < 0) +- mgmt_pending_remove(cmd); ++ mgmt_pending_free(cmd); + + failed: + hci_dev_unlock(hdev); +@@ -7065,7 +7085,6 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data, + + for (i = 0; i < irk_count; i++) { + struct mgmt_irk_info *irk = &cp->irks[i]; +- u8 addr_type = le_addr_type(irk->addr.type); + + if (hci_is_blocked_key(hdev, + HCI_BLOCKED_KEY_TYPE_IRK, +@@ -7075,12 +7094,8 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data, + continue; + } + +- /* When using SMP over BR/EDR, the addr type should be set to BREDR */ +- if (irk->addr.type == BDADDR_BREDR) +- addr_type = BDADDR_BREDR; +- + hci_add_irk(hdev, &irk->addr.bdaddr, +- addr_type, irk->val, ++ le_addr_type(irk->addr.type), irk->val, + BDADDR_ANY); + } + +@@ -7145,15 +7160,6 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, + + bt_dev_dbg(hdev, "key_count %u", key_count); + +- for (i = 0; i < key_count; i++) { +- struct mgmt_ltk_info *key = &cp->keys[i]; +- +- if (!ltk_is_valid(key)) +- return mgmt_cmd_status(sk, hdev->id, +- MGMT_OP_LOAD_LONG_TERM_KEYS, +- MGMT_STATUS_INVALID_PARAMS); +- } +- + hci_dev_lock(hdev); + + hci_smp_ltks_clear(hdev); +@@ -7161,7 +7167,6 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, + for (i = 0; i < key_count; i++) { + struct mgmt_ltk_info *key = &cp->keys[i]; + u8 type, authenticated; +- u8 addr_type = le_addr_type(key->addr.type); + + if (hci_is_blocked_key(hdev, + HCI_BLOCKED_KEY_TYPE_LTK, +@@ -7171,6 +7176,12 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, + continue; + } + ++ if (!ltk_is_valid(key)) { ++ bt_dev_warn(hdev, "Invalid LTK for %pMR", ++ &key->addr.bdaddr); ++ continue; ++ } ++ + switch (key->type) { + case MGMT_LTK_UNAUTHENTICATED: + authenticated = 0x00; +@@ -7196,12 +7207,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, + continue; + } + +- /* When using SMP over BR/EDR, the addr type should be set to BREDR */ +- if (key->addr.type == BDADDR_BREDR) +- addr_type = BDADDR_BREDR; +- + hci_add_ltk(hdev, &key->addr.bdaddr, +- addr_type, type, authenticated, ++ le_addr_type(key->addr.type), type, authenticated, + key->val, key->enc_size, key->ediv, key->rand); + } + +@@ -9450,7 +9457,7 @@ void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, + + ev.store_hint = persistent; + bacpy(&ev.key.addr.bdaddr, &key->bdaddr); +- ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type); ++ ev.key.addr.type = BDADDR_BREDR; + ev.key.type = key->type; + memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE); + ev.key.pin_len = key->pin_len; +@@ -9501,7 +9508,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent) + ev.store_hint = persistent; + + bacpy(&ev.key.addr.bdaddr, &key->bdaddr); +- ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type); ++ ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type); + ev.key.type = mgmt_ltk_type(key); + ev.key.enc_size = key->enc_size; + ev.key.ediv = key->ediv; +@@ -9530,7 +9537,7 @@ void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent) + + bacpy(&ev.rpa, &irk->rpa); + bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr); +- ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type); ++ ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type); + memcpy(ev.irk.val, irk->val, sizeof(irk->val)); + + mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL); +@@ -9559,7 +9566,7 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, + ev.store_hint = persistent; + + bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr); +- ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type); ++ ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type); + ev.key.type = csrk->type; + memcpy(ev.key.val, csrk->val, sizeof(csrk->val)); + +@@ -9637,18 +9644,6 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, + mgmt_event_skb(skb, NULL); + } + +-static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data) +-{ +- struct sock **sk = data; +- +- cmd->cmd_complete(cmd, 0); +- +- *sk = cmd->sk; +- sock_hold(*sk); +- +- mgmt_pending_remove(cmd); +-} +- + static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data) + { + struct hci_dev *hdev = data; +@@ -9689,8 +9684,6 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, + if (link_type != ACL_LINK && link_type != LE_LINK) + return; + +- mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk); +- + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = link_to_bdaddr(link_type, addr_type); + ev.reason = reason; +@@ -9703,9 +9696,6 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, + + if (sk) + sock_put(sk); +- +- mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, +- hdev); + } + + void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, +diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c +index fa3986cfd5266..56f7f041c9a60 100644 +--- a/net/bluetooth/smp.c ++++ b/net/bluetooth/smp.c +@@ -1061,7 +1061,6 @@ static void smp_notify_keys(struct l2cap_conn *conn) + } + + if (smp->remote_irk) { +- smp->remote_irk->link_type = hcon->type; + mgmt_new_irk(hdev, smp->remote_irk, persistent); + + /* Now that user space can be considered to know the +@@ -1081,28 +1080,24 @@ static void smp_notify_keys(struct l2cap_conn *conn) + } + + if (smp->csrk) { +- smp->csrk->link_type = hcon->type; + smp->csrk->bdaddr_type = hcon->dst_type; + bacpy(&smp->csrk->bdaddr, &hcon->dst); + mgmt_new_csrk(hdev, smp->csrk, persistent); + } + + if (smp->responder_csrk) { +- smp->responder_csrk->link_type = hcon->type; + smp->responder_csrk->bdaddr_type = hcon->dst_type; + bacpy(&smp->responder_csrk->bdaddr, &hcon->dst); + mgmt_new_csrk(hdev, smp->responder_csrk, persistent); + } + + if (smp->ltk) { +- smp->ltk->link_type = hcon->type; + smp->ltk->bdaddr_type = hcon->dst_type; + bacpy(&smp->ltk->bdaddr, &hcon->dst); + mgmt_new_ltk(hdev, smp->ltk, persistent); + } + + if (smp->responder_ltk) { +- smp->responder_ltk->link_type = hcon->type; + smp->responder_ltk->bdaddr_type = hcon->dst_type; + bacpy(&smp->responder_ltk->bdaddr, &hcon->dst); + mgmt_new_ltk(hdev, smp->responder_ltk, persistent); +@@ -1122,8 +1117,6 @@ static void smp_notify_keys(struct l2cap_conn *conn) + key = hci_add_link_key(hdev, smp->conn->hcon, &hcon->dst, + smp->link_key, type, 0, &persistent); + if (key) { +- key->link_type = hcon->type; +- key->bdaddr_type = hcon->dst_type; + mgmt_new_link_key(hdev, key, persistent); + + /* Don't keep debug keys around if the relevant +diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c +index e69a872bfc1d7..a6d8cd9a58078 100644 +--- a/net/bridge/br_fdb.c ++++ b/net/bridge/br_fdb.c +@@ -1425,12 +1425,10 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, + modified = true; + } + +- if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) { ++ if (test_and_set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) { + /* Refresh entry */ + fdb->used = jiffies; +- } else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) { +- /* Take over SW learned entry */ +- set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags); ++ } else { + modified = true; + } + +diff --git a/net/can/bcm.c b/net/can/bcm.c +index 9168114fc87f7..00208ee13e578 100644 +--- a/net/can/bcm.c ++++ b/net/can/bcm.c +@@ -1428,6 +1428,10 @@ static void bcm_notify(struct bcm_sock *bo, unsigned long msg, + + /* remove device reference, if this is our bound device */ + if (bo->bound && bo->ifindex == dev->ifindex) { ++#if IS_ENABLED(CONFIG_PROC_FS) ++ if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) ++ remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir); ++#endif + bo->bound = 0; + bo->ifindex = 0; + notify_enodev = 1; +diff --git a/net/core/sock.c b/net/core/sock.c +index 55d85d50b3e49..bc2a4e38dcea8 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -2019,14 +2019,6 @@ int sk_getsockopt(struct sock *sk, int level, int optname, + return 0; + } + +-int sock_getsockopt(struct socket *sock, int level, int optname, +- char __user *optval, int __user *optlen) +-{ +- return sk_getsockopt(sock->sk, level, optname, +- USER_SOCKPTR(optval), +- USER_SOCKPTR(optlen)); +-} +- + /* + * Initialize an sk_lock. + * +diff --git a/net/ipv4/fou_core.c b/net/ipv4/fou_core.c +index b38b82ae903de..e0b8d6b17a34d 100644 +--- a/net/ipv4/fou_core.c ++++ b/net/ipv4/fou_core.c +@@ -50,7 +50,7 @@ struct fou_net { + + static inline struct fou *fou_from_sock(struct sock *sk) + { +- return sk->sk_user_data; ++ return rcu_dereference_sk_user_data(sk); + } + + static int fou_recv_pull(struct sk_buff *skb, struct fou *fou, size_t len) +@@ -233,9 +233,15 @@ static struct sk_buff *fou_gro_receive(struct sock *sk, + struct sk_buff *skb) + { + const struct net_offload __rcu **offloads; +- u8 proto = fou_from_sock(sk)->protocol; ++ struct fou *fou = fou_from_sock(sk); + const struct net_offload *ops; + struct sk_buff *pp = NULL; ++ u8 proto; ++ ++ if (!fou) ++ goto out; ++ ++ proto = fou->protocol; + + /* We can clear the encap_mark for FOU as we are essentially doing + * one of two possible things. We are either adding an L4 tunnel +@@ -263,14 +269,24 @@ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb, + int nhoff) + { + const struct net_offload __rcu **offloads; +- u8 proto = fou_from_sock(sk)->protocol; ++ struct fou *fou = fou_from_sock(sk); + const struct net_offload *ops; +- int err = -ENOSYS; ++ u8 proto; ++ int err; ++ ++ if (!fou) { ++ err = -ENOENT; ++ goto out; ++ } ++ ++ proto = fou->protocol; + + offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; + ops = rcu_dereference(offloads[proto]); +- if (WARN_ON(!ops || !ops->callbacks.gro_complete)) ++ if (WARN_ON(!ops || !ops->callbacks.gro_complete)) { ++ err = -ENOSYS; + goto out; ++ } + + err = ops->callbacks.gro_complete(skb, nhoff); + +@@ -320,6 +336,9 @@ static struct sk_buff *gue_gro_receive(struct sock *sk, + struct gro_remcsum grc; + u8 proto; + ++ if (!fou) ++ goto out; ++ + skb_gro_remcsum_init(&grc); + + off = skb_gro_offset(skb); +diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c +index 53b0d62fd2c2d..fe6178715ba05 100644 +--- a/net/ipv4/tcp_bpf.c ++++ b/net/ipv4/tcp_bpf.c +@@ -577,7 +577,7 @@ static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) + err = sk_stream_error(sk, msg->msg_flags, err); + release_sock(sk); + sk_psock_put(sk, psock); +- return copied ? copied : err; ++ return copied > 0 ? copied : err; + } + + enum { +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 24c7c955dc955..b565c8a8e7bad 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -5880,6 +5880,11 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, + * RFC 5961 4.2 : Send a challenge ack + */ + if (th->syn) { ++ if (sk->sk_state == TCP_SYN_RECV && sk->sk_socket && th->ack && ++ TCP_SKB_CB(skb)->seq + 1 == TCP_SKB_CB(skb)->end_seq && ++ TCP_SKB_CB(skb)->seq + 1 == tp->rcv_nxt && ++ TCP_SKB_CB(skb)->ack_seq == tp->snd_nxt) ++ goto pass; + syn_challenge: + if (syn_inerr) + TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); +@@ -5889,6 +5894,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, + goto discard; + } + ++pass: + bpf_skops_parse_hdr(sk, skb); + + return true; +diff --git a/net/ipv6/ila/ila.h b/net/ipv6/ila/ila.h +index ad5f6f6ba3330..85b92917849bf 100644 +--- a/net/ipv6/ila/ila.h ++++ b/net/ipv6/ila/ila.h +@@ -108,6 +108,7 @@ int ila_lwt_init(void); + void ila_lwt_fini(void); + + int ila_xlat_init_net(struct net *net); ++void ila_xlat_pre_exit_net(struct net *net); + void ila_xlat_exit_net(struct net *net); + + int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info); +diff --git a/net/ipv6/ila/ila_main.c b/net/ipv6/ila/ila_main.c +index 69caed07315f0..976c78efbae17 100644 +--- a/net/ipv6/ila/ila_main.c ++++ b/net/ipv6/ila/ila_main.c +@@ -71,6 +71,11 @@ static __net_init int ila_init_net(struct net *net) + return err; + } + ++static __net_exit void ila_pre_exit_net(struct net *net) ++{ ++ ila_xlat_pre_exit_net(net); ++} ++ + static __net_exit void ila_exit_net(struct net *net) + { + ila_xlat_exit_net(net); +@@ -78,6 +83,7 @@ static __net_exit void ila_exit_net(struct net *net) + + static struct pernet_operations ila_net_ops = { + .init = ila_init_net, ++ .pre_exit = ila_pre_exit_net, + .exit = ila_exit_net, + .id = &ila_net_id, + .size = sizeof(struct ila_net), +diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c +index 67e8c9440977a..534a4498e280d 100644 +--- a/net/ipv6/ila/ila_xlat.c ++++ b/net/ipv6/ila/ila_xlat.c +@@ -619,6 +619,15 @@ int ila_xlat_init_net(struct net *net) + return 0; + } + ++void ila_xlat_pre_exit_net(struct net *net) ++{ ++ struct ila_net *ilan = net_generic(net, ila_net_id); ++ ++ if (ilan->xlat.hooks_registered) ++ nf_unregister_net_hooks(net, ila_nf_hook_ops, ++ ARRAY_SIZE(ila_nf_hook_ops)); ++} ++ + void ila_xlat_exit_net(struct net *net) + { + struct ila_net *ilan = net_generic(net, ila_net_id); +@@ -626,10 +635,6 @@ void ila_xlat_exit_net(struct net *net) + rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL); + + free_bucket_spinlocks(ilan->xlat.locks); +- +- if (ilan->xlat.hooks_registered) +- nf_unregister_net_hooks(net, ila_nf_hook_ops, +- ARRAY_SIZE(ila_nf_hook_ops)); + } + + static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila) +diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c +index 5d8ed6c90b7ef..5885810da412f 100644 +--- a/net/netfilter/nf_conncount.c ++++ b/net/netfilter/nf_conncount.c +@@ -321,7 +321,6 @@ insert_tree(struct net *net, + struct nf_conncount_rb *rbconn; + struct nf_conncount_tuple *conn; + unsigned int count = 0, gc_count = 0; +- u8 keylen = data->keylen; + bool do_gc = true; + + spin_lock_bh(&nf_conncount_locks[hash]); +@@ -333,7 +332,7 @@ insert_tree(struct net *net, + rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node); + + parent = *rbnode; +- diff = key_diff(key, rbconn->key, keylen); ++ diff = key_diff(key, rbconn->key, data->keylen); + if (diff < 0) { + rbnode = &((*rbnode)->rb_left); + } else if (diff > 0) { +@@ -378,7 +377,7 @@ insert_tree(struct net *net, + + conn->tuple = *tuple; + conn->zone = *zone; +- memcpy(rbconn->key, key, sizeof(u32) * keylen); ++ memcpy(rbconn->key, key, sizeof(u32) * data->keylen); + + nf_conncount_list_init(&rbconn->list); + list_add(&conn->node, &rbconn->list.head); +@@ -403,7 +402,6 @@ count_tree(struct net *net, + struct rb_node *parent; + struct nf_conncount_rb *rbconn; + unsigned int hash; +- u8 keylen = data->keylen; + + hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS; + root = &data->root[hash]; +@@ -414,7 +412,7 @@ count_tree(struct net *net, + + rbconn = rb_entry(parent, struct nf_conncount_rb, node); + +- diff = key_diff(key, rbconn->key, keylen); ++ diff = key_diff(key, rbconn->key, data->keylen); + if (diff < 0) { + parent = rcu_dereference_raw(parent->rb_left); + } else if (diff > 0) { +diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c +index 9cff99558694d..30955dd45779e 100644 +--- a/net/sched/sch_cake.c ++++ b/net/sched/sch_cake.c +@@ -786,12 +786,15 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, + * queue, accept the collision, update the host tags. + */ + q->way_collisions++; +- if (q->flows[outer_hash + k].set == CAKE_SET_BULK) { +- q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--; +- q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--; +- } + allocate_src = cake_dsrc(flow_mode); + allocate_dst = cake_ddst(flow_mode); ++ ++ if (q->flows[outer_hash + k].set == CAKE_SET_BULK) { ++ if (allocate_src) ++ q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--; ++ if (allocate_dst) ++ q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--; ++ } + found: + /* reserve queue for future packets in same flow */ + reduced_hash = outer_hash + k; +diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c +index 0224a92492453..d36eeb7b05029 100644 +--- a/net/sched/sch_netem.c ++++ b/net/sched/sch_netem.c +@@ -742,11 +742,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) + + err = qdisc_enqueue(skb, q->qdisc, &to_free); + kfree_skb_list(to_free); +- if (err != NET_XMIT_SUCCESS && +- net_xmit_drop_count(err)) { +- qdisc_qstats_drop(sch); +- qdisc_tree_reduce_backlog(sch, 1, +- pkt_len); ++ if (err != NET_XMIT_SUCCESS) { ++ if (net_xmit_drop_count(err)) ++ qdisc_qstats_drop(sch); ++ qdisc_tree_reduce_backlog(sch, 1, pkt_len); + } + goto tfifo_dequeue; + } +diff --git a/net/socket.c b/net/socket.c +index 8d83c4bb163b4..9db33cd4a71b8 100644 +--- a/net/socket.c ++++ b/net/socket.c +@@ -2281,33 +2281,23 @@ static bool sock_use_custom_sol_socket(const struct socket *sock) + return test_bit(SOCK_CUSTOM_SOCKOPT, &sock->flags); + } + +-/* +- * Set a socket option. Because we don't know the option lengths we have +- * to pass the user mode parameter for the protocols to sort out. +- */ +-int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval, +- int optlen) ++int do_sock_setsockopt(struct socket *sock, bool compat, int level, ++ int optname, sockptr_t optval, int optlen) + { +- sockptr_t optval = USER_SOCKPTR(user_optval); + const struct proto_ops *ops; + char *kernel_optval = NULL; +- int err, fput_needed; +- struct socket *sock; ++ int err; + + if (optlen < 0) + return -EINVAL; + +- sock = sockfd_lookup_light(fd, &err, &fput_needed); +- if (!sock) +- return err; +- + err = security_socket_setsockopt(sock, level, optname); + if (err) + goto out_put; + +- if (!in_compat_syscall()) ++ if (!compat) + err = BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock->sk, &level, &optname, +- user_optval, &optlen, ++ optval, &optlen, + &kernel_optval); + if (err < 0) + goto out_put; +@@ -2328,6 +2318,27 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval, + optlen); + kfree(kernel_optval); + out_put: ++ return err; ++} ++EXPORT_SYMBOL(do_sock_setsockopt); ++ ++/* Set a socket option. Because we don't know the option lengths we have ++ * to pass the user mode parameter for the protocols to sort out. ++ */ ++int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval, ++ int optlen) ++{ ++ sockptr_t optval = USER_SOCKPTR(user_optval); ++ bool compat = in_compat_syscall(); ++ int err, fput_needed; ++ struct socket *sock; ++ ++ sock = sockfd_lookup_light(fd, &err, &fput_needed); ++ if (!sock) ++ return err; ++ ++ err = do_sock_setsockopt(sock, compat, level, optname, optval, optlen); ++ + fput_light(sock->file, fput_needed); + return err; + } +@@ -2341,6 +2352,43 @@ SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname, + INDIRECT_CALLABLE_DECLARE(bool tcp_bpf_bypass_getsockopt(int level, + int optname)); + ++int do_sock_getsockopt(struct socket *sock, bool compat, int level, ++ int optname, sockptr_t optval, sockptr_t optlen) ++{ ++ int max_optlen __maybe_unused = 0; ++ const struct proto_ops *ops; ++ int err; ++ ++ err = security_socket_getsockopt(sock, level, optname); ++ if (err) ++ return err; ++ ++ if (!compat) ++ copy_from_sockptr(&max_optlen, optlen, sizeof(int)); ++ ++ ops = READ_ONCE(sock->ops); ++ if (level == SOL_SOCKET) { ++ err = sk_getsockopt(sock->sk, level, optname, optval, optlen); ++ } else if (unlikely(!ops->getsockopt)) { ++ err = -EOPNOTSUPP; ++ } else { ++ if (WARN_ONCE(optval.is_kernel || optlen.is_kernel, ++ "Invalid argument type")) ++ return -EOPNOTSUPP; ++ ++ err = ops->getsockopt(sock, level, optname, optval.user, ++ optlen.user); ++ } ++ ++ if (!compat) ++ err = BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock->sk, level, optname, ++ optval, optlen, max_optlen, ++ err); ++ ++ return err; ++} ++EXPORT_SYMBOL(do_sock_getsockopt); ++ + /* + * Get a socket option. Because we don't know the option lengths we have + * to pass a user mode parameter for the protocols to sort out. +@@ -2348,36 +2396,18 @@ INDIRECT_CALLABLE_DECLARE(bool tcp_bpf_bypass_getsockopt(int level, + int __sys_getsockopt(int fd, int level, int optname, char __user *optval, + int __user *optlen) + { +- int max_optlen __maybe_unused; +- const struct proto_ops *ops; + int err, fput_needed; + struct socket *sock; ++ bool compat; + + sock = sockfd_lookup_light(fd, &err, &fput_needed); + if (!sock) + return err; + +- err = security_socket_getsockopt(sock, level, optname); +- if (err) +- goto out_put; +- +- if (!in_compat_syscall()) +- max_optlen = BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen); ++ compat = in_compat_syscall(); ++ err = do_sock_getsockopt(sock, compat, level, optname, ++ USER_SOCKPTR(optval), USER_SOCKPTR(optlen)); + +- ops = READ_ONCE(sock->ops); +- if (level == SOL_SOCKET) +- err = sock_getsockopt(sock, level, optname, optval, optlen); +- else if (unlikely(!ops->getsockopt)) +- err = -EOPNOTSUPP; +- else +- err = ops->getsockopt(sock, level, optname, optval, +- optlen); +- +- if (!in_compat_syscall()) +- err = BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock->sk, level, optname, +- optval, optlen, max_optlen, +- err); +-out_put: + fput_light(sock->file, fput_needed); + return err; + } +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index b7f62442d8268..dca4429014db1 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -693,9 +693,6 @@ static void init_peercred(struct sock *sk) + + static void copy_peercred(struct sock *sk, struct sock *peersk) + { +- const struct cred *old_cred; +- struct pid *old_pid; +- + if (sk < peersk) { + spin_lock(&sk->sk_peer_lock); + spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING); +@@ -703,16 +700,12 @@ static void copy_peercred(struct sock *sk, struct sock *peersk) + spin_lock(&peersk->sk_peer_lock); + spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING); + } +- old_pid = sk->sk_peer_pid; +- old_cred = sk->sk_peer_cred; ++ + sk->sk_peer_pid = get_pid(peersk->sk_peer_pid); + sk->sk_peer_cred = get_cred(peersk->sk_peer_cred); + + spin_unlock(&sk->sk_peer_lock); + spin_unlock(&peersk->sk_peer_lock); +- +- put_pid(old_pid); +- put_cred(old_cred); + } + + static int unix_listen(struct socket *sock, int backlog) +diff --git a/rust/Makefile b/rust/Makefile +index e5619f25b55ca..333b9a482473d 100644 +--- a/rust/Makefile ++++ b/rust/Makefile +@@ -363,9 +363,7 @@ $(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers.c FORCE + quiet_cmd_exports = EXPORTS $@ + cmd_exports = \ + $(NM) -p --defined-only $< \ +- | grep -E ' (T|R|D) ' | cut -d ' ' -f 3 \ +- | xargs -Isymbol \ +- echo 'EXPORT_SYMBOL_RUST_GPL(symbol);' > $@ ++ | awk '/ (T|R|D|B) / {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@ + + $(obj)/exports_core_generated.h: $(obj)/core.o FORCE + $(call if_changed,exports) +diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs +index fdb778e65d79d..e23e7827d756d 100644 +--- a/rust/kernel/types.rs ++++ b/rust/kernel/types.rs +@@ -248,7 +248,7 @@ pub fn ffi_init(init_func: impl FnOnce(*mut T)) -> impl PinInit { + } + + /// Returns a raw pointer to the opaque data. +- pub fn get(&self) -> *mut T { ++ pub const fn get(&self) -> *mut T { + UnsafeCell::get(&self.value).cast::() + } + +diff --git a/rust/macros/module.rs b/rust/macros/module.rs +index acd0393b50957..7dee348ef0cc8 100644 +--- a/rust/macros/module.rs ++++ b/rust/macros/module.rs +@@ -203,7 +203,11 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream { + // freed until the module is unloaded. + #[cfg(MODULE)] + static THIS_MODULE: kernel::ThisModule = unsafe {{ +- kernel::ThisModule::from_ptr(&kernel::bindings::__this_module as *const _ as *mut _) ++ extern \"C\" {{ ++ static __this_module: kernel::types::Opaque; ++ }} ++ ++ kernel::ThisModule::from_ptr(__this_module.get()) + }}; + #[cfg(not(MODULE))] + static THIS_MODULE: kernel::ThisModule = unsafe {{ +diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c +index 98c2bdbfcaed6..4625674f0e95b 100644 +--- a/security/smack/smack_lsm.c ++++ b/security/smack/smack_lsm.c +@@ -3769,12 +3769,18 @@ static int smack_unix_stream_connect(struct sock *sock, + } + } + +- /* +- * Cross reference the peer labels for SO_PEERSEC. +- */ + if (rc == 0) { ++ /* ++ * Cross reference the peer labels for SO_PEERSEC. ++ */ + nsp->smk_packet = ssp->smk_out; + ssp->smk_packet = osp->smk_out; ++ ++ /* ++ * new/child/established socket must inherit listening socket labels ++ */ ++ nsp->smk_out = osp->smk_out; ++ nsp->smk_in = osp->smk_in; + } + + return rc; +diff --git a/sound/core/control.c b/sound/core/control.c +index 59c8658966d4c..dd4bdb39782cd 100644 +--- a/sound/core/control.c ++++ b/sound/core/control.c +@@ -1553,12 +1553,16 @@ static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol, + static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) + { +- int change; ++ int err, change; + struct user_element *ue = kcontrol->private_data; + unsigned int size = ue->elem_data_size; + char *dst = ue->elem_data + + snd_ctl_get_ioff(kcontrol, &ucontrol->id) * size; + ++ err = sanity_check_input_values(ue->card, ucontrol, &ue->info, false); ++ if (err < 0) ++ return err; ++ + change = memcmp(&ucontrol->value, dst, size) != 0; + if (change) + memcpy(dst, &ucontrol->value, size); +diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c +index 5d8e1d944b0af..7b276047f85a7 100644 +--- a/sound/hda/hdmi_chmap.c ++++ b/sound/hda/hdmi_chmap.c +@@ -753,6 +753,20 @@ static int hdmi_chmap_ctl_get(struct snd_kcontrol *kcontrol, + return 0; + } + ++/* a simple sanity check for input values to chmap kcontrol */ ++static int chmap_value_check(struct hdac_chmap *hchmap, ++ const struct snd_ctl_elem_value *ucontrol) ++{ ++ int i; ++ ++ for (i = 0; i < hchmap->channels_max; i++) { ++ if (ucontrol->value.integer.value[i] < 0 || ++ ucontrol->value.integer.value[i] > SNDRV_CHMAP_LAST) ++ return -EINVAL; ++ } ++ return 0; ++} ++ + static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) + { +@@ -764,6 +778,10 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol, + unsigned char chmap[8], per_pin_chmap[8]; + int i, err, ca, prepared = 0; + ++ err = chmap_value_check(hchmap, ucontrol); ++ if (err < 0) ++ return err; ++ + /* No monitor is connected in dyn_pcm_assign. + * It's invalid to setup the chmap + */ +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index 8396d1d93668c..63bd0e384bae2 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -311,6 +311,7 @@ enum { + CXT_FIXUP_HEADSET_MIC, + CXT_FIXUP_HP_MIC_NO_PRESENCE, + CXT_PINCFG_SWS_JS201D, ++ CXT_PINCFG_TOP_SPEAKER, + }; + + /* for hda_fixup_thinkpad_acpi() */ +@@ -978,6 +979,13 @@ static const struct hda_fixup cxt_fixups[] = { + .type = HDA_FIXUP_PINS, + .v.pins = cxt_pincfg_sws_js201d, + }, ++ [CXT_PINCFG_TOP_SPEAKER] = { ++ .type = HDA_FIXUP_PINS, ++ .v.pins = (const struct hda_pintbl[]) { ++ { 0x1d, 0x82170111 }, ++ { } ++ }, ++ }, + }; + + static const struct snd_pci_quirk cxt5045_fixups[] = { +@@ -1074,6 +1082,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { + SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI), + SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004), + SND_PCI_QUIRK(0x1c06, 0x2012, "Lemote A1205", CXT_PINCFG_LEMOTE_A1205), ++ SND_PCI_QUIRK(0x2782, 0x12c3, "Sirius Gen1", CXT_PINCFG_TOP_SPEAKER), ++ SND_PCI_QUIRK(0x2782, 0x12c5, "Sirius Gen2", CXT_PINCFG_TOP_SPEAKER), + {} + }; + +@@ -1093,6 +1103,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = { + { .id = CXT_FIXUP_HP_MIC_NO_PRESENCE, .name = "hp-mic-fix" }, + { .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" }, + { .id = CXT_PINCFG_SWS_JS201D, .name = "sws-js201d" }, ++ { .id = CXT_PINCFG_TOP_SPEAKER, .name = "sirius-top-speaker" }, + {} + }; + +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 5736516275a34..6661fed2c2bbf 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -7366,6 +7366,7 @@ enum { + ALC236_FIXUP_HP_GPIO_LED, + ALC236_FIXUP_HP_MUTE_LED, + ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF, ++ ALC236_FIXUP_LENOVO_INV_DMIC, + ALC298_FIXUP_SAMSUNG_AMP, + ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, + ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, +@@ -8922,6 +8923,12 @@ static const struct hda_fixup alc269_fixups[] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc236_fixup_hp_mute_led_micmute_vref, + }, ++ [ALC236_FIXUP_LENOVO_INV_DMIC] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc_fixup_inv_dmic, ++ .chained = true, ++ .chain_id = ALC283_FIXUP_INT_MIC, ++ }, + [ALC298_FIXUP_SAMSUNG_AMP] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc298_fixup_samsung_amp, +@@ -9866,6 +9873,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x87f6, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP), + SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP), ++ SND_PCI_QUIRK(0x103c, 0x87fd, "HP Laptop 14-dq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), + SND_PCI_QUIRK(0x103c, 0x87fe, "HP Laptop 15s-fq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), + SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), +@@ -10298,6 +10306,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x38f9, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), ++ SND_PCI_QUIRK(0x17aa, 0x3913, "Lenovo 145", ALC236_FIXUP_LENOVO_INV_DMIC), + SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), + SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI), + SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), +@@ -10546,6 +10555,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { + {.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"}, + {.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"}, + {.id = ALC285_FIXUP_HP_GPIO_AMP_INIT, .name = "alc285-hp-amp-init"}, ++ {.id = ALC236_FIXUP_LENOVO_INV_DMIC, .name = "alc236-fixup-lenovo-inv-mic"}, + {} + }; + #define ALC225_STANDARD_PINS \ +diff --git a/sound/soc/codecs/tas2781-fmwlib.c b/sound/soc/codecs/tas2781-fmwlib.c +index 41ad82a429163..3639dcd0bbb2b 100644 +--- a/sound/soc/codecs/tas2781-fmwlib.c ++++ b/sound/soc/codecs/tas2781-fmwlib.c +@@ -21,7 +21,7 @@ + #include + #include + #include +- ++#include + + #define ERROR_PRAM_CRCCHK 0x0000000 + #define ERROR_YRAM_CRCCHK 0x0000001 +@@ -125,8 +125,7 @@ static struct tasdevice_config_info *tasdevice_add_config( + /* convert data[offset], data[offset + 1], data[offset + 2] and + * data[offset + 3] into host + */ +- cfg_info->nblocks = +- be32_to_cpup((__be32 *)&config_data[config_offset]); ++ cfg_info->nblocks = get_unaligned_be32(&config_data[config_offset]); + config_offset += 4; + + /* Several kinds of dsp/algorithm firmwares can run on tas2781, +@@ -170,14 +169,14 @@ static struct tasdevice_config_info *tasdevice_add_config( + + } + bk_da[i]->yram_checksum = +- be16_to_cpup((__be16 *)&config_data[config_offset]); ++ get_unaligned_be16(&config_data[config_offset]); + config_offset += 2; + bk_da[i]->block_size = +- be32_to_cpup((__be32 *)&config_data[config_offset]); ++ get_unaligned_be32(&config_data[config_offset]); + config_offset += 4; + + bk_da[i]->n_subblks = +- be32_to_cpup((__be32 *)&config_data[config_offset]); ++ get_unaligned_be32(&config_data[config_offset]); + + config_offset += 4; + +@@ -227,7 +226,7 @@ int tasdevice_rca_parser(void *context, const struct firmware *fmw) + } + buf = (unsigned char *)fmw->data; + +- fw_hdr->img_sz = be32_to_cpup((__be32 *)&buf[offset]); ++ fw_hdr->img_sz = get_unaligned_be32(&buf[offset]); + offset += 4; + if (fw_hdr->img_sz != fmw->size) { + dev_err(tas_priv->dev, +@@ -238,9 +237,9 @@ int tasdevice_rca_parser(void *context, const struct firmware *fmw) + goto out; + } + +- fw_hdr->checksum = be32_to_cpup((__be32 *)&buf[offset]); ++ fw_hdr->checksum = get_unaligned_be32(&buf[offset]); + offset += 4; +- fw_hdr->binary_version_num = be32_to_cpup((__be32 *)&buf[offset]); ++ fw_hdr->binary_version_num = get_unaligned_be32(&buf[offset]); + if (fw_hdr->binary_version_num < 0x103) { + dev_err(tas_priv->dev, "File version 0x%04x is too low", + fw_hdr->binary_version_num); +@@ -249,7 +248,7 @@ int tasdevice_rca_parser(void *context, const struct firmware *fmw) + goto out; + } + offset += 4; +- fw_hdr->drv_fw_version = be32_to_cpup((__be32 *)&buf[offset]); ++ fw_hdr->drv_fw_version = get_unaligned_be32(&buf[offset]); + offset += 8; + fw_hdr->plat_type = buf[offset]; + offset += 1; +@@ -277,11 +276,11 @@ int tasdevice_rca_parser(void *context, const struct firmware *fmw) + for (i = 0; i < TASDEVICE_DEVICE_SUM; i++, offset++) + fw_hdr->devs[i] = buf[offset]; + +- fw_hdr->nconfig = be32_to_cpup((__be32 *)&buf[offset]); ++ fw_hdr->nconfig = get_unaligned_be32(&buf[offset]); + offset += 4; + + for (i = 0; i < TASDEVICE_CONFIG_SUM; i++) { +- fw_hdr->config_size[i] = be32_to_cpup((__be32 *)&buf[offset]); ++ fw_hdr->config_size[i] = get_unaligned_be32(&buf[offset]); + offset += 4; + total_config_sz += fw_hdr->config_size[i]; + } +@@ -330,7 +329,7 @@ static int fw_parse_block_data_kernel(struct tasdevice_fw *tas_fmw, + /* convert data[offset], data[offset + 1], data[offset + 2] and + * data[offset + 3] into host + */ +- block->type = be32_to_cpup((__be32 *)&data[offset]); ++ block->type = get_unaligned_be32(&data[offset]); + offset += 4; + + block->is_pchksum_present = data[offset]; +@@ -345,10 +344,10 @@ static int fw_parse_block_data_kernel(struct tasdevice_fw *tas_fmw, + block->ychksum = data[offset]; + offset++; + +- block->blk_size = be32_to_cpup((__be32 *)&data[offset]); ++ block->blk_size = get_unaligned_be32(&data[offset]); + offset += 4; + +- block->nr_subblocks = be32_to_cpup((__be32 *)&data[offset]); ++ block->nr_subblocks = get_unaligned_be32(&data[offset]); + offset += 4; + + if (offset + block->blk_size > fmw->size) { +@@ -381,7 +380,7 @@ static int fw_parse_data_kernel(struct tasdevice_fw *tas_fmw, + offset = -EINVAL; + goto out; + } +- img_data->nr_blk = be32_to_cpup((__be32 *)&data[offset]); ++ img_data->nr_blk = get_unaligned_be32(&data[offset]); + offset += 4; + + img_data->dev_blks = kcalloc(img_data->nr_blk, +@@ -477,14 +476,14 @@ static int fw_parse_variable_header_kernel( + offset = -EINVAL; + goto out; + } +- fw_hdr->device_family = be16_to_cpup((__be16 *)&buf[offset]); ++ fw_hdr->device_family = get_unaligned_be16(&buf[offset]); + if (fw_hdr->device_family != 0) { + dev_err(tas_priv->dev, "%s:not TAS device\n", __func__); + offset = -EINVAL; + goto out; + } + offset += 2; +- fw_hdr->device = be16_to_cpup((__be16 *)&buf[offset]); ++ fw_hdr->device = get_unaligned_be16(&buf[offset]); + if (fw_hdr->device >= TASDEVICE_DSP_TAS_MAX_DEVICE || + fw_hdr->device == 6) { + dev_err(tas_priv->dev, "Unsupported dev %d\n", fw_hdr->device); +@@ -502,7 +501,7 @@ static int fw_parse_variable_header_kernel( + goto out; + } + +- tas_fmw->nr_programs = be32_to_cpup((__be32 *)&buf[offset]); ++ tas_fmw->nr_programs = get_unaligned_be32(&buf[offset]); + offset += 4; + + if (tas_fmw->nr_programs == 0 || tas_fmw->nr_programs > +@@ -521,14 +520,14 @@ static int fw_parse_variable_header_kernel( + + for (i = 0; i < tas_fmw->nr_programs; i++) { + program = &(tas_fmw->programs[i]); +- program->prog_size = be32_to_cpup((__be32 *)&buf[offset]); ++ program->prog_size = get_unaligned_be32(&buf[offset]); + offset += 4; + } + + /* Skip the unused prog_size */ + offset += 4 * (TASDEVICE_MAXPROGRAM_NUM_KERNEL - tas_fmw->nr_programs); + +- tas_fmw->nr_configurations = be32_to_cpup((__be32 *)&buf[offset]); ++ tas_fmw->nr_configurations = get_unaligned_be32(&buf[offset]); + offset += 4; + + /* The max number of config in firmware greater than 4 pieces of +@@ -560,7 +559,7 @@ static int fw_parse_variable_header_kernel( + + for (i = 0; i < tas_fmw->nr_programs; i++) { + config = &(tas_fmw->configs[i]); +- config->cfg_size = be32_to_cpup((__be32 *)&buf[offset]); ++ config->cfg_size = get_unaligned_be32(&buf[offset]); + offset += 4; + } + +@@ -598,7 +597,7 @@ static int tasdevice_process_block(void *context, unsigned char *data, + switch (subblk_typ) { + case TASDEVICE_CMD_SING_W: { + int i; +- unsigned short len = be16_to_cpup((__be16 *)&data[2]); ++ unsigned short len = get_unaligned_be16(&data[2]); + + subblk_offset += 2; + if (subblk_offset + 4 * len > sublocksize) { +@@ -624,7 +623,7 @@ static int tasdevice_process_block(void *context, unsigned char *data, + } + break; + case TASDEVICE_CMD_BURST: { +- unsigned short len = be16_to_cpup((__be16 *)&data[2]); ++ unsigned short len = get_unaligned_be16(&data[2]); + + subblk_offset += 2; + if (subblk_offset + 4 + len > sublocksize) { +@@ -665,7 +664,7 @@ static int tasdevice_process_block(void *context, unsigned char *data, + is_err = true; + break; + } +- sleep_time = be16_to_cpup((__be16 *)&data[2]) * 1000; ++ sleep_time = get_unaligned_be16(&data[2]) * 1000; + usleep_range(sleep_time, sleep_time + 50); + subblk_offset += 2; + } +@@ -940,7 +939,7 @@ static int fw_parse_variable_hdr(struct tasdevice_priv + + offset += len; + +- fw_hdr->device_family = be32_to_cpup((__be32 *)&buf[offset]); ++ fw_hdr->device_family = get_unaligned_be32(&buf[offset]); + if (fw_hdr->device_family != 0) { + dev_err(tas_priv->dev, "%s: not TAS device\n", __func__); + offset = -EINVAL; +@@ -948,7 +947,7 @@ static int fw_parse_variable_hdr(struct tasdevice_priv + } + offset += 4; + +- fw_hdr->device = be32_to_cpup((__be32 *)&buf[offset]); ++ fw_hdr->device = get_unaligned_be32(&buf[offset]); + if (fw_hdr->device >= TASDEVICE_DSP_TAS_MAX_DEVICE || + fw_hdr->device == 6) { + dev_err(tas_priv->dev, "Unsupported dev %d\n", fw_hdr->device); +@@ -993,7 +992,7 @@ static int fw_parse_block_data(struct tasdevice_fw *tas_fmw, + offset = -EINVAL; + goto out; + } +- block->type = be32_to_cpup((__be32 *)&data[offset]); ++ block->type = get_unaligned_be32(&data[offset]); + offset += 4; + + if (tas_fmw->fw_hdr.fixed_hdr.drv_ver >= PPC_DRIVER_CRCCHK) { +@@ -1018,7 +1017,7 @@ static int fw_parse_block_data(struct tasdevice_fw *tas_fmw, + block->is_ychksum_present = 0; + } + +- block->nr_cmds = be32_to_cpup((__be32 *)&data[offset]); ++ block->nr_cmds = get_unaligned_be32(&data[offset]); + offset += 4; + + n = block->nr_cmds * 4; +@@ -1069,7 +1068,7 @@ static int fw_parse_data(struct tasdevice_fw *tas_fmw, + goto out; + } + offset += n; +- img_data->nr_blk = be16_to_cpup((__be16 *)&data[offset]); ++ img_data->nr_blk = get_unaligned_be16(&data[offset]); + offset += 2; + + img_data->dev_blks = kcalloc(img_data->nr_blk, +@@ -1106,7 +1105,7 @@ static int fw_parse_program_data(struct tasdevice_priv *tas_priv, + offset = -EINVAL; + goto out; + } +- tas_fmw->nr_programs = be16_to_cpup((__be16 *)&buf[offset]); ++ tas_fmw->nr_programs = get_unaligned_be16(&buf[offset]); + offset += 2; + + if (tas_fmw->nr_programs == 0) { +@@ -1173,7 +1172,7 @@ static int fw_parse_configuration_data( + offset = -EINVAL; + goto out; + } +- tas_fmw->nr_configurations = be16_to_cpup((__be16 *)&data[offset]); ++ tas_fmw->nr_configurations = get_unaligned_be16(&data[offset]); + offset += 2; + + if (tas_fmw->nr_configurations == 0) { +@@ -1805,7 +1804,7 @@ static int fw_parse_header(struct tasdevice_priv *tas_priv, + /* Convert data[offset], data[offset + 1], data[offset + 2] and + * data[offset + 3] into host + */ +- fw_fixed_hdr->fwsize = be32_to_cpup((__be32 *)&buf[offset]); ++ fw_fixed_hdr->fwsize = get_unaligned_be32(&buf[offset]); + offset += 4; + if (fw_fixed_hdr->fwsize != fmw->size) { + dev_err(tas_priv->dev, "File size not match, %lu %u", +@@ -1814,9 +1813,9 @@ static int fw_parse_header(struct tasdevice_priv *tas_priv, + goto out; + } + offset += 4; +- fw_fixed_hdr->ppcver = be32_to_cpup((__be32 *)&buf[offset]); ++ fw_fixed_hdr->ppcver = get_unaligned_be32(&buf[offset]); + offset += 8; +- fw_fixed_hdr->drv_ver = be32_to_cpup((__be32 *)&buf[offset]); ++ fw_fixed_hdr->drv_ver = get_unaligned_be32(&buf[offset]); + offset += 72; + + out: +@@ -1858,7 +1857,7 @@ static int fw_parse_calibration_data(struct tasdevice_priv *tas_priv, + offset = -EINVAL; + goto out; + } +- tas_fmw->nr_calibrations = be16_to_cpup((__be16 *)&data[offset]); ++ tas_fmw->nr_calibrations = get_unaligned_be16(&data[offset]); + offset += 2; + + if (tas_fmw->nr_calibrations != 1) { +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c +index 85e3bbf7e5f0e..7729f8f4d5e61 100644 +--- a/sound/soc/soc-dapm.c ++++ b/sound/soc/soc-dapm.c +@@ -4018,6 +4018,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w, + + case SND_SOC_DAPM_POST_PMD: + kfree(substream->runtime); ++ substream->runtime = NULL; + break; + + default: +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c +index 7e8fca0b06628..a643ef654b9d7 100644 +--- a/sound/soc/soc-topology.c ++++ b/sound/soc/soc-topology.c +@@ -851,6 +851,8 @@ static int soc_tplg_denum_create_values(struct soc_tplg *tplg, struct soc_enum * + se->dobj.control.dvalues[i] = le32_to_cpu(ec->values[i]); + } + ++ se->items = le32_to_cpu(ec->items); ++ se->values = (const unsigned int *)se->dobj.control.dvalues; + return 0; + } + +diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c +index 7133ec13322b3..cf1e63daad86b 100644 +--- a/sound/soc/sof/topology.c ++++ b/sound/soc/sof/topology.c +@@ -2040,6 +2040,8 @@ static int sof_link_unload(struct snd_soc_component *scomp, struct snd_soc_dobj + if (!slink) + return 0; + ++ slink->link->platforms->name = NULL; ++ + kfree(slink->tuples); + list_del(&slink->list); + kfree(slink->hw_configs); +diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c +index 5124b6c9ceb4b..d1cb49d54f008 100644 +--- a/sound/soc/sunxi/sun4i-i2s.c ++++ b/sound/soc/sunxi/sun4i-i2s.c +@@ -100,8 +100,8 @@ + #define SUN8I_I2S_CTRL_MODE_PCM (0 << 4) + + #define SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK BIT(19) +-#define SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED (1 << 19) +-#define SUN8I_I2S_FMT0_LRCLK_POLARITY_NORMAL (0 << 19) ++#define SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH (1 << 19) ++#define SUN8I_I2S_FMT0_LRCLK_POLARITY_START_LOW (0 << 19) + #define SUN8I_I2S_FMT0_LRCK_PERIOD_MASK GENMASK(17, 8) + #define SUN8I_I2S_FMT0_LRCK_PERIOD(period) ((period - 1) << 8) + #define SUN8I_I2S_FMT0_BCLK_POLARITY_MASK BIT(7) +@@ -727,65 +727,37 @@ static int sun4i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s, + static int sun8i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s, + unsigned int fmt) + { +- u32 mode, val; ++ u32 mode, lrclk_pol, bclk_pol, val; + u8 offset; + +- /* +- * DAI clock polarity +- * +- * The setup for LRCK contradicts the datasheet, but under a +- * scope it's clear that the LRCK polarity is reversed +- * compared to the expected polarity on the bus. +- */ +- switch (fmt & SND_SOC_DAIFMT_INV_MASK) { +- case SND_SOC_DAIFMT_IB_IF: +- /* Invert both clocks */ +- val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED; +- break; +- case SND_SOC_DAIFMT_IB_NF: +- /* Invert bit clock */ +- val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED | +- SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED; +- break; +- case SND_SOC_DAIFMT_NB_IF: +- /* Invert frame clock */ +- val = 0; +- break; +- case SND_SOC_DAIFMT_NB_NF: +- val = SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED; +- break; +- default: +- return -EINVAL; +- } +- +- regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG, +- SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK | +- SUN8I_I2S_FMT0_BCLK_POLARITY_MASK, +- val); +- + /* DAI Mode */ + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_DSP_A: ++ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH; + mode = SUN8I_I2S_CTRL_MODE_PCM; + offset = 1; + break; + + case SND_SOC_DAIFMT_DSP_B: ++ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH; + mode = SUN8I_I2S_CTRL_MODE_PCM; + offset = 0; + break; + + case SND_SOC_DAIFMT_I2S: ++ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_LOW; + mode = SUN8I_I2S_CTRL_MODE_LEFT; + offset = 1; + break; + + case SND_SOC_DAIFMT_LEFT_J: ++ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH; + mode = SUN8I_I2S_CTRL_MODE_LEFT; + offset = 0; + break; + + case SND_SOC_DAIFMT_RIGHT_J: ++ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH; + mode = SUN8I_I2S_CTRL_MODE_RIGHT; + offset = 0; + break; +@@ -803,6 +775,35 @@ static int sun8i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s, + SUN8I_I2S_TX_CHAN_OFFSET_MASK, + SUN8I_I2S_TX_CHAN_OFFSET(offset)); + ++ /* DAI clock polarity */ ++ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_NORMAL; ++ ++ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { ++ case SND_SOC_DAIFMT_IB_IF: ++ /* Invert both clocks */ ++ lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK; ++ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED; ++ break; ++ case SND_SOC_DAIFMT_IB_NF: ++ /* Invert bit clock */ ++ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED; ++ break; ++ case SND_SOC_DAIFMT_NB_IF: ++ /* Invert frame clock */ ++ lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK; ++ break; ++ case SND_SOC_DAIFMT_NB_NF: ++ /* No inversion */ ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG, ++ SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK | ++ SUN8I_I2S_FMT0_BCLK_POLARITY_MASK, ++ lrclk_pol | bclk_pol); ++ + /* DAI clock master masks */ + switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) { + case SND_SOC_DAIFMT_BP_FP: +@@ -834,65 +835,37 @@ static int sun8i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s, + static int sun50i_h6_i2s_set_soc_fmt(const struct sun4i_i2s *i2s, + unsigned int fmt) + { +- u32 mode, val; ++ u32 mode, lrclk_pol, bclk_pol, val; + u8 offset; + +- /* +- * DAI clock polarity +- * +- * The setup for LRCK contradicts the datasheet, but under a +- * scope it's clear that the LRCK polarity is reversed +- * compared to the expected polarity on the bus. +- */ +- switch (fmt & SND_SOC_DAIFMT_INV_MASK) { +- case SND_SOC_DAIFMT_IB_IF: +- /* Invert both clocks */ +- val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED; +- break; +- case SND_SOC_DAIFMT_IB_NF: +- /* Invert bit clock */ +- val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED | +- SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED; +- break; +- case SND_SOC_DAIFMT_NB_IF: +- /* Invert frame clock */ +- val = 0; +- break; +- case SND_SOC_DAIFMT_NB_NF: +- val = SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED; +- break; +- default: +- return -EINVAL; +- } +- +- regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG, +- SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK | +- SUN8I_I2S_FMT0_BCLK_POLARITY_MASK, +- val); +- + /* DAI Mode */ + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_DSP_A: ++ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH; + mode = SUN8I_I2S_CTRL_MODE_PCM; + offset = 1; + break; + + case SND_SOC_DAIFMT_DSP_B: ++ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH; + mode = SUN8I_I2S_CTRL_MODE_PCM; + offset = 0; + break; + + case SND_SOC_DAIFMT_I2S: ++ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_LOW; + mode = SUN8I_I2S_CTRL_MODE_LEFT; + offset = 1; + break; + + case SND_SOC_DAIFMT_LEFT_J: ++ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH; + mode = SUN8I_I2S_CTRL_MODE_LEFT; + offset = 0; + break; + + case SND_SOC_DAIFMT_RIGHT_J: ++ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH; + mode = SUN8I_I2S_CTRL_MODE_RIGHT; + offset = 0; + break; +@@ -910,6 +883,36 @@ static int sun50i_h6_i2s_set_soc_fmt(const struct sun4i_i2s *i2s, + SUN50I_H6_I2S_TX_CHAN_SEL_OFFSET_MASK, + SUN50I_H6_I2S_TX_CHAN_SEL_OFFSET(offset)); + ++ /* DAI clock polarity */ ++ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_NORMAL; ++ ++ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { ++ case SND_SOC_DAIFMT_IB_IF: ++ /* Invert both clocks */ ++ lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK; ++ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED; ++ break; ++ case SND_SOC_DAIFMT_IB_NF: ++ /* Invert bit clock */ ++ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED; ++ break; ++ case SND_SOC_DAIFMT_NB_IF: ++ /* Invert frame clock */ ++ lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK; ++ break; ++ case SND_SOC_DAIFMT_NB_NF: ++ /* No inversion */ ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG, ++ SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK | ++ SUN8I_I2S_FMT0_BCLK_POLARITY_MASK, ++ lrclk_pol | bclk_pol); ++ ++ + /* DAI clock master masks */ + switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) { + case SND_SOC_DAIFMT_BP_FP: +diff --git a/sound/soc/tegra/tegra210_ahub.c b/sound/soc/tegra/tegra210_ahub.c +index 3f114a2adfced..ab3c6b2544d20 100644 +--- a/sound/soc/tegra/tegra210_ahub.c ++++ b/sound/soc/tegra/tegra210_ahub.c +@@ -2,7 +2,7 @@ + // + // tegra210_ahub.c - Tegra210 AHUB driver + // +-// Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. ++// Copyright (c) 2020-2024, NVIDIA CORPORATION. All rights reserved. + + #include + #include +@@ -1391,11 +1391,13 @@ static int tegra_ahub_probe(struct platform_device *pdev) + return err; + } + ++ pm_runtime_enable(&pdev->dev); ++ + err = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); +- if (err) ++ if (err) { ++ pm_runtime_disable(&pdev->dev); + return err; +- +- pm_runtime_enable(&pdev->dev); ++ } + + return 0; + } +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c +index de35b9a21dad7..ceed16a10285a 100644 +--- a/tools/lib/bpf/libbpf.c ++++ b/tools/lib/bpf/libbpf.c +@@ -9753,7 +9753,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i) + struct bpf_map * + bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev) + { +- if (prev == NULL) ++ if (prev == NULL && obj != NULL) + return obj->maps; + + return __bpf_map__iter(prev, obj, 1); +@@ -9762,7 +9762,7 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev) + struct bpf_map * + bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next) + { +- if (next == NULL) { ++ if (next == NULL && obj != NULL) { + if (!obj->nr_maps) + return NULL; + return obj->maps + obj->nr_maps - 1; +diff --git a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c +index 890a8236a8ba7..2809f9a25c433 100644 +--- a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c ++++ b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c +@@ -28,9 +28,11 @@ static int check_vgem(int fd) + version.name = name; + + ret = ioctl(fd, DRM_IOCTL_VERSION, &version); +- if (ret) ++ if (ret || version.name_len != 4) + return 0; + ++ name[4] = '\0'; ++ + return !strcmp(name, "vgem"); + } + +diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile +index d417de1051233..91a48efb140be 100644 +--- a/tools/testing/selftests/net/Makefile ++++ b/tools/testing/selftests/net/Makefile +@@ -85,7 +85,8 @@ TEST_GEN_FILES += csum + TEST_GEN_FILES += nat6to4.o + TEST_GEN_FILES += xdp_dummy.o + TEST_GEN_FILES += ip_local_port_range +-TEST_GEN_FILES += bind_wildcard ++TEST_GEN_PROGS += bind_wildcard ++TEST_GEN_PROGS += bind_timewait + TEST_PROGS += test_vxlan_mdb.sh + TEST_PROGS += test_bridge_neigh_suppress.sh + TEST_PROGS += test_vxlan_nolocalbypass.sh