From 65cc5540d2d438a724eabed35d9890a5dfd2753e Mon Sep 17 00:00:00 2001 From: Julian Sikorski Date: Sun, 6 Oct 2024 11:45:31 +0000 Subject: [PATCH] Update odroidxu4-current to 6.6.54 --- .../odroidxu4-6.6/patch-6.6.51-52.patch | 2980 +++ .../odroidxu4-6.6/patch-6.6.52-53.patch | 2051 ++ .../odroidxu4-6.6/patch-6.6.53-54.patch | 22341 ++++++++++++++++ 3 files changed, 27372 insertions(+) create mode 100644 patch/kernel/archive/odroidxu4-6.6/patch-6.6.51-52.patch create mode 100644 patch/kernel/archive/odroidxu4-6.6/patch-6.6.52-53.patch create mode 100644 patch/kernel/archive/odroidxu4-6.6/patch-6.6.53-54.patch diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.51-52.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.51-52.patch new file mode 100644 index 000000000000..2120602259d6 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.51-52.patch @@ -0,0 +1,2980 @@ +diff --git a/Makefile b/Makefile +index 6dea0c21636820..5b22e3ff440ca1 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 51 ++SUBLEVEL = 52 + EXTRAVERSION = + NAME = Hurr durr I'ma ninja sloth + +diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts +index d9905a08c6ce86..66443d52cd34d8 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts ++++ b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts +@@ -332,7 +332,7 @@ led_pin: led-pin { + + pmic { + pmic_int_l: pmic-int-l { +- rockchip,pins = <2 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>; ++ rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi +index f2ca5d30d223ce..aba2748fe54c77 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi ++++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi +@@ -119,6 +119,22 @@ &emmc_phy { + drive-impedance-ohm = <33>; + }; + ++&gpio3 { ++ /* ++ * The Qseven BIOS_DISABLE signal on the RK3399-Q7 keeps the on-module ++ * eMMC and SPI flash powered-down initially (in fact it keeps the ++ * reset signal asserted). BIOS_DISABLE_OVERRIDE pin allows to override ++ * that signal so that eMMC and SPI can be used regardless of the state ++ * of the signal. ++ */ ++ bios-disable-override-hog { ++ gpios = ; ++ gpio-hog; ++ line-name = "bios_disable_override"; ++ output-high; ++ }; ++}; ++ + &gmac { + assigned-clocks = <&cru SCLK_RMII_SRC>; + assigned-clock-parents = <&clkin_gmac>; +@@ -374,6 +390,7 @@ vdd_cpu_b: regulator@60 { + + &i2s0 { + pinctrl-0 = <&i2s0_2ch_bus>; ++ pinctrl-1 = <&i2s0_2ch_bus_bclk_off>; + rockchip,playback-channels = <2>; + rockchip,capture-channels = <2>; + status = "okay"; +@@ -382,8 +399,8 @@ &i2s0 { + /* + * As Q7 does not specify neither a global nor a RX clock for I2S these + * signals are not used. Furthermore I2S0_LRCK_RX is used as GPIO. +- * Therefore we have to redefine the i2s0_2ch_bus definition to prevent +- * conflicts. ++ * Therefore we have to redefine the i2s0_2ch_bus and i2s0_2ch_bus_bclk_off ++ * definitions to prevent conflicts. + */ + &i2s0_2ch_bus { + rockchip,pins = +@@ -393,6 +410,14 @@ &i2s0_2ch_bus { + <3 RK_PD7 1 &pcfg_pull_none>; + }; + ++&i2s0_2ch_bus_bclk_off { ++ rockchip,pins = ++ <3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>, ++ <3 RK_PD2 1 &pcfg_pull_none>, ++ <3 RK_PD3 1 &pcfg_pull_none>, ++ <3 RK_PD7 1 &pcfg_pull_none>; ++}; ++ + &io_domains { + status = "okay"; + bt656-supply = <&vcc_1v8>; +@@ -408,9 +433,14 @@ &pcie_clkreqn_cpm { + + &pinctrl { + pinctrl-names = "default"; +- pinctrl-0 = <&q7_thermal_pin>; ++ pinctrl-0 = <&q7_thermal_pin &bios_disable_override_hog_pin>; + + gpios { ++ bios_disable_override_hog_pin: bios-disable-override-hog-pin { ++ rockchip,pins = ++ <3 RK_PD5 RK_FUNC_GPIO &pcfg_pull_down>; ++ }; ++ + q7_thermal_pin: q7-thermal-pin { + rockchip,pins = + <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>; +diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c +index 20f72cd1d8138d..03eaad5949f141 100644 +--- a/arch/powerpc/kernel/setup-common.c ++++ b/arch/powerpc/kernel/setup-common.c +@@ -950,6 +950,7 @@ void __init setup_arch(char **cmdline_p) + mem_topology_setup(); + /* Set max_mapnr before paging_init() */ + set_max_mapnr(max_pfn); ++ high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); + + /* + * Release secondary cpus out of their spinloops at 0x60 now that +diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c +index 07e8f4f1e07f89..9dbef559af4cbf 100644 +--- a/arch/powerpc/mm/mem.c ++++ b/arch/powerpc/mm/mem.c +@@ -287,8 +287,6 @@ void __init mem_init(void) + swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags); + #endif + +- high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); +- + kasan_late_init(); + + memblock_free_all(); +diff --git a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi +index 062b97c6e7dff4..4874e3bb42ab10 100644 +--- a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi ++++ b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi +@@ -204,6 +204,8 @@ &i2c6 { + + &mmc0 { + max-frequency = <100000000>; ++ assigned-clocks = <&syscrg JH7110_SYSCLK_SDIO0_SDCARD>; ++ assigned-clock-rates = <50000000>; + bus-width = <8>; + cap-mmc-highspeed; + mmc-ddr-1_8v; +@@ -220,6 +222,8 @@ &mmc0 { + + &mmc1 { + max-frequency = <100000000>; ++ assigned-clocks = <&syscrg JH7110_SYSCLK_SDIO1_SDCARD>; ++ assigned-clock-rates = <50000000>; + bus-width = <4>; + no-sdio; + no-mmc; +diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c +index 8f3a4d16bb791f..d1e2d12279e268 100644 +--- a/arch/x86/hyperv/hv_init.c ++++ b/arch/x86/hyperv/hv_init.c +@@ -35,7 +35,6 @@ + #include + #include + +-int hyperv_init_cpuhp; + u64 hv_current_partition_id = ~0ull; + EXPORT_SYMBOL_GPL(hv_current_partition_id); + +@@ -607,8 +606,6 @@ void __init hyperv_init(void) + + register_syscore_ops(&hv_syscore_ops); + +- hyperv_init_cpuhp = cpuhp; +- + if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_ACCESS_PARTITION_ID) + hv_get_partition_id(); + +@@ -637,7 +634,7 @@ void __init hyperv_init(void) + clean_guest_os_id: + wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); + hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0); +- cpuhp_remove_state(cpuhp); ++ cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE); + free_ghcb_page: + free_percpu(hv_ghcb_pg); + free_vp_assist_page: +diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h +index 896445edc6a8e9..ec95d6e9f1682c 100644 +--- a/arch/x86/include/asm/mshyperv.h ++++ b/arch/x86/include/asm/mshyperv.h +@@ -40,7 +40,6 @@ static inline unsigned char hv_get_nmi_reason(void) + } + + #if IS_ENABLED(CONFIG_HYPERV) +-extern int hyperv_init_cpuhp; + extern bool hyperv_paravisor_present; + + extern void *hv_hypercall_pg; +diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c +index e6bba12c759cb7..fac4b4116efe10 100644 +--- a/arch/x86/kernel/cpu/mshyperv.c ++++ b/arch/x86/kernel/cpu/mshyperv.c +@@ -199,8 +199,8 @@ static void hv_machine_shutdown(void) + * Call hv_cpu_die() on all the CPUs, otherwise later the hypervisor + * corrupts the old VP Assist Pages and can crash the kexec kernel. + */ +- if (kexec_in_progress && hyperv_init_cpuhp > 0) +- cpuhp_remove_state(hyperv_init_cpuhp); ++ if (kexec_in_progress) ++ cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE); + + /* The function calls stop_other_cpus(). */ + native_machine_shutdown(); +diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h +index 59c7f88b915a43..edb46123e3eb0f 100644 +--- a/drivers/cxl/cxlmem.h ++++ b/drivers/cxl/cxlmem.h +@@ -538,7 +538,7 @@ enum cxl_opcode { + 0x3b, 0x3f, 0x17) + + #define DEFINE_CXL_VENDOR_DEBUG_UUID \ +- UUID_INIT(0xe1819d9, 0x11a9, 0x400c, 0x81, 0x1f, 0xd6, 0x07, 0x19, \ ++ UUID_INIT(0x5e1819d9, 0x11a9, 0x400c, 0x81, 0x1f, 0xd6, 0x07, 0x19, \ + 0x40, 0x3d, 0x86) + + struct cxl_mbox_get_supported_logs { +diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c +index ee899f8e67215f..bea7e574f916e1 100644 +--- a/drivers/dma-buf/heaps/cma_heap.c ++++ b/drivers/dma-buf/heaps/cma_heap.c +@@ -165,7 +165,7 @@ static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf) + struct vm_area_struct *vma = vmf->vma; + struct cma_heap_buffer *buffer = vma->vm_private_data; + +- if (vmf->pgoff > buffer->pagecount) ++ if (vmf->pgoff >= buffer->pagecount) + return VM_FAULT_SIGBUS; + + vmf->page = buffer->pages[vmf->pgoff]; +diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c +index 77595e9622da34..7ac0228fe532ee 100644 +--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c +@@ -23,6 +23,7 @@ + + #include "amdgpu.h" + #include "amdgpu_jpeg.h" ++#include "amdgpu_cs.h" + #include "soc15.h" + #include "soc15d.h" + #include "vcn_v1_0.h" +@@ -34,6 +35,9 @@ + static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev); + static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev); + static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring); ++static int jpeg_v1_dec_ring_parse_cs(struct amdgpu_cs_parser *parser, ++ struct amdgpu_job *job, ++ struct amdgpu_ib *ib); + + static void jpeg_v1_0_decode_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val) + { +@@ -300,7 +304,10 @@ static void jpeg_v1_0_decode_ring_emit_ib(struct amdgpu_ring *ring, + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0)); +- amdgpu_ring_write(ring, (vmid | (vmid << 4))); ++ if (ring->funcs->parse_cs) ++ amdgpu_ring_write(ring, 0); ++ else ++ amdgpu_ring_write(ring, (vmid | (vmid << 4))); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0)); +@@ -554,6 +561,7 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = { + .get_rptr = jpeg_v1_0_decode_ring_get_rptr, + .get_wptr = jpeg_v1_0_decode_ring_get_wptr, + .set_wptr = jpeg_v1_0_decode_ring_set_wptr, ++ .parse_cs = jpeg_v1_dec_ring_parse_cs, + .emit_frame_size = + 6 + 6 + /* hdp invalidate / flush */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + +@@ -612,3 +620,69 @@ static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring) + + vcn_v1_0_set_pg_for_begin_use(ring, set_clocks); + } ++ ++/** ++ * jpeg_v1_dec_ring_parse_cs - command submission parser ++ * ++ * @parser: Command submission parser context ++ * @job: the job to parse ++ * @ib: the IB to parse ++ * ++ * Parse the command stream, return -EINVAL for invalid packet, ++ * 0 otherwise ++ */ ++static int jpeg_v1_dec_ring_parse_cs(struct amdgpu_cs_parser *parser, ++ struct amdgpu_job *job, ++ struct amdgpu_ib *ib) ++{ ++ u32 i, reg, res, cond, type; ++ int ret = 0; ++ struct amdgpu_device *adev = parser->adev; ++ ++ for (i = 0; i < ib->length_dw ; i += 2) { ++ reg = CP_PACKETJ_GET_REG(ib->ptr[i]); ++ res = CP_PACKETJ_GET_RES(ib->ptr[i]); ++ cond = CP_PACKETJ_GET_COND(ib->ptr[i]); ++ type = CP_PACKETJ_GET_TYPE(ib->ptr[i]); ++ ++ if (res || cond != PACKETJ_CONDITION_CHECK0) /* only allow 0 for now */ ++ return -EINVAL; ++ ++ if (reg >= JPEG_V1_REG_RANGE_START && reg <= JPEG_V1_REG_RANGE_END) ++ continue; ++ ++ switch (type) { ++ case PACKETJ_TYPE0: ++ if (reg != JPEG_V1_LMI_JPEG_WRITE_64BIT_BAR_HIGH && ++ reg != JPEG_V1_LMI_JPEG_WRITE_64BIT_BAR_LOW && ++ reg != JPEG_V1_LMI_JPEG_READ_64BIT_BAR_HIGH && ++ reg != JPEG_V1_LMI_JPEG_READ_64BIT_BAR_LOW && ++ reg != JPEG_V1_REG_CTX_INDEX && ++ reg != JPEG_V1_REG_CTX_DATA) { ++ ret = -EINVAL; ++ } ++ break; ++ case PACKETJ_TYPE1: ++ if (reg != JPEG_V1_REG_CTX_DATA) ++ ret = -EINVAL; ++ break; ++ case PACKETJ_TYPE3: ++ if (reg != JPEG_V1_REG_SOFT_RESET) ++ ret = -EINVAL; ++ break; ++ case PACKETJ_TYPE6: ++ if (ib->ptr[i] != CP_PACKETJ_NOP) ++ ret = -EINVAL; ++ break; ++ default: ++ ret = -EINVAL; ++ } ++ ++ if (ret) { ++ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); ++ break; ++ } ++ } ++ ++ return ret; ++} +diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h +index bbf33a6a397298..9654d22e03763c 100644 +--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h ++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h +@@ -29,4 +29,15 @@ int jpeg_v1_0_sw_init(void *handle); + void jpeg_v1_0_sw_fini(void *handle); + void jpeg_v1_0_start(struct amdgpu_device *adev, int mode); + ++#define JPEG_V1_REG_RANGE_START 0x8000 ++#define JPEG_V1_REG_RANGE_END 0x803f ++ ++#define JPEG_V1_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x8238 ++#define JPEG_V1_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x8239 ++#define JPEG_V1_LMI_JPEG_READ_64BIT_BAR_HIGH 0x825a ++#define JPEG_V1_LMI_JPEG_READ_64BIT_BAR_LOW 0x825b ++#define JPEG_V1_REG_CTX_INDEX 0x8328 ++#define JPEG_V1_REG_CTX_DATA 0x8329 ++#define JPEG_V1_REG_SOFT_RESET 0x83a0 ++ + #endif /*__JPEG_V1_0_H__*/ +diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c +index 0050e0a06cbc24..9bde0c8bf914a6 100644 +--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c ++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c +@@ -143,32 +143,25 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource + + link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link_enc); ++ if (link_enc->funcs->fec_set_ready == NULL) ++ return DC_NOT_SUPPORTED; + +- if (!dp_should_enable_fec(link)) +- return status; +- +- if (link_enc->funcs->fec_set_ready && +- link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { +- if (ready) { +- fec_config = 1; +- status = core_link_write_dpcd(link, +- DP_FEC_CONFIGURATION, +- &fec_config, +- sizeof(fec_config)); +- if (status == DC_OK) { +- link_enc->funcs->fec_set_ready(link_enc, true); +- link->fec_state = dc_link_fec_ready; +- } else { +- link_enc->funcs->fec_set_ready(link_enc, false); +- link->fec_state = dc_link_fec_not_ready; +- dm_error("dpcd write failed to set fec_ready"); +- } +- } else if (link->fec_state == dc_link_fec_ready) { ++ if (ready && dp_should_enable_fec(link)) { ++ fec_config = 1; ++ ++ status = core_link_write_dpcd(link, DP_FEC_CONFIGURATION, ++ &fec_config, sizeof(fec_config)); ++ ++ if (status == DC_OK) { ++ link_enc->funcs->fec_set_ready(link_enc, true); ++ link->fec_state = dc_link_fec_ready; ++ } ++ } else { ++ if (link->fec_state == dc_link_fec_ready) { + fec_config = 0; +- status = core_link_write_dpcd(link, +- DP_FEC_CONFIGURATION, +- &fec_config, +- sizeof(fec_config)); ++ core_link_write_dpcd(link, DP_FEC_CONFIGURATION, ++ &fec_config, sizeof(fec_config)); ++ + link_enc->funcs->fec_set_ready(link_enc, false); + link->fec_state = dc_link_fec_not_ready; + } +@@ -183,14 +176,12 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) + + link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link_enc); +- +- if (!dp_should_enable_fec(link)) ++ if (link_enc->funcs->fec_set_enable == NULL) + return; + +- if (link_enc->funcs->fec_set_enable && +- link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { +- if (link->fec_state == dc_link_fec_ready && enable) { +- /* Accord to DP spec, FEC enable sequence can first ++ if (enable && dp_should_enable_fec(link)) { ++ if (link->fec_state == dc_link_fec_ready) { ++ /* According to DP spec, FEC enable sequence can first + * be transmitted anytime after 1000 LL codes have + * been transmitted on the link after link training + * completion. Using 1 lane RBR should have the maximum +@@ -200,7 +191,9 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) + udelay(7); + link_enc->funcs->fec_set_enable(link_enc, true); + link->fec_state = dc_link_fec_enabled; +- } else if (link->fec_state == dc_link_fec_enabled && !enable) { ++ } ++ } else { ++ if (link->fec_state == dc_link_fec_enabled) { + link_enc->funcs->fec_set_enable(link_enc, false); + link->fec_state = dc_link_fec_ready; + } +diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h +index 6c8b4afa7cc6eb..ccc79bdd4f5adf 100644 +--- a/drivers/gpu/drm/amd/include/atomfirmware.h ++++ b/drivers/gpu/drm/amd/include/atomfirmware.h +@@ -1006,7 +1006,7 @@ struct display_object_info_table_v1_4 + uint16_t supporteddevices; + uint8_t number_of_path; + uint8_t reserved; +- struct atom_display_object_path_v2 display_path[8]; //the real number of this included in the structure is calculated by using the (whole structure size - the header size- number_of_path)/size of atom_display_object_path ++ struct atom_display_object_path_v2 display_path[]; //the real number of this included in the structure is calculated by using the (whole structure size - the header size- number_of_path)/size of atom_display_object_path + }; + + struct display_object_info_table_v1_5 { +@@ -1016,7 +1016,7 @@ struct display_object_info_table_v1_5 { + uint8_t reserved; + // the real number of this included in the structure is calculated by using the + // (whole structure size - the header size- number_of_path)/size of atom_display_object_path +- struct atom_display_object_path_v3 display_path[8]; ++ struct atom_display_object_path_v3 display_path[]; + }; + + /* +diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c +index 039da0d1a613b7..5b2506c65e9520 100644 +--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c ++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c +@@ -208,6 +208,18 @@ static const struct dmi_system_id orientation_data[] = { + DMI_MATCH(DMI_BOARD_NAME, "KUN"), + }, + .driver_data = (void *)&lcd1600x2560_rightside_up, ++ }, { /* AYN Loki Max */ ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ayn"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Loki Max"), ++ }, ++ .driver_data = (void *)&lcd1080x1920_leftside_up, ++ }, { /* AYN Loki Zero */ ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ayn"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Loki Zero"), ++ }, ++ .driver_data = (void *)&lcd1080x1920_leftside_up, + }, { /* Chuwi HiBook (CWI514) */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"), +diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c +index 5860428da8de87..7b4ed5ca0a9bd2 100644 +--- a/drivers/gpu/drm/drm_syncobj.c ++++ b/drivers/gpu/drm/drm_syncobj.c +@@ -1421,6 +1421,7 @@ drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data, + struct drm_syncobj *syncobj; + struct eventfd_ctx *ev_fd_ctx; + struct syncobj_eventfd_entry *entry; ++ int ret; + + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) + return -EOPNOTSUPP; +@@ -1436,13 +1437,15 @@ drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data, + return -ENOENT; + + ev_fd_ctx = eventfd_ctx_fdget(args->fd); +- if (IS_ERR(ev_fd_ctx)) +- return PTR_ERR(ev_fd_ctx); ++ if (IS_ERR(ev_fd_ctx)) { ++ ret = PTR_ERR(ev_fd_ctx); ++ goto err_fdget; ++ } + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { +- eventfd_ctx_put(ev_fd_ctx); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto err_kzalloc; + } + entry->syncobj = syncobj; + entry->ev_fd_ctx = ev_fd_ctx; +@@ -1453,6 +1456,12 @@ drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data, + drm_syncobj_put(syncobj); + + return 0; ++ ++err_kzalloc: ++ eventfd_ctx_put(ev_fd_ctx); ++err_fdget: ++ drm_syncobj_put(syncobj); ++ return ret; + } + + int +diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +index b5de5a9f596715..236dfff81fea43 100644 +--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c ++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +@@ -2695,9 +2695,9 @@ static void prepare_context_registration_info_v70(struct intel_context *ce, + ce->parallel.guc.wqi_tail = 0; + ce->parallel.guc.wqi_head = 0; + +- wq_desc_offset = i915_ggtt_offset(ce->state) + ++ wq_desc_offset = (u64)i915_ggtt_offset(ce->state) + + __get_parent_scratch_offset(ce); +- wq_base_offset = i915_ggtt_offset(ce->state) + ++ wq_base_offset = (u64)i915_ggtt_offset(ce->state) + + __get_wq_offset(ce); + info->wq_desc_lo = lower_32_bits(wq_desc_offset); + info->wq_desc_hi = upper_32_bits(wq_desc_offset); +diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c +index 8090dde0328082..96deaf85c0cd27 100644 +--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c +@@ -99,7 +99,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, + * was a bad idea, and is only provided for backwards + * compatibility for older targets. + */ +- return -ENODEV; ++ return -ENOENT; + } + + if (IS_ERR(fw)) { +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h +index 50f0c1914f58e8..4c3f7439657987 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h +@@ -46,6 +46,8 @@ u32 gm107_ram_probe_fbp(const struct nvkm_ram_func *, + u32 gm200_ram_probe_fbp_amount(const struct nvkm_ram_func *, u32, + struct nvkm_device *, int, int *); + ++int gp100_ram_init(struct nvkm_ram *); ++ + /* RAM type-specific MR calculation routines */ + int nvkm_sddr2_calc(struct nvkm_ram *); + int nvkm_sddr3_calc(struct nvkm_ram *); +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c +index 378f6fb7099077..8987a21e81d174 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c +@@ -27,7 +27,7 @@ + #include + #include + +-static int ++int + gp100_ram_init(struct nvkm_ram *ram) + { + struct nvkm_subdev *subdev = &ram->fb->subdev; +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c +index 8550f5e473474b..b6b6ee59019d70 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c +@@ -5,6 +5,7 @@ + + static const struct nvkm_ram_func + gp102_ram = { ++ .init = gp100_ram_init, + }; + + int +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 4246348ca16e99..a5987fafbedde4 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -515,6 +515,8 @@ + #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100 + + #define I2C_VENDOR_ID_GOODIX 0x27c6 ++#define I2C_DEVICE_ID_GOODIX_01E8 0x01e8 ++#define I2C_DEVICE_ID_GOODIX_01E9 0x01e9 + #define I2C_DEVICE_ID_GOODIX_01F0 0x01f0 + + #define USB_VENDOR_ID_GOODTOUCH 0x1aad +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c +index 17efe6e2a1a44c..8ef41d6e71d421 100644 +--- a/drivers/hid/hid-multitouch.c ++++ b/drivers/hid/hid-multitouch.c +@@ -1442,6 +1442,30 @@ static int mt_event(struct hid_device *hid, struct hid_field *field, + return 0; + } + ++static __u8 *mt_report_fixup(struct hid_device *hdev, __u8 *rdesc, ++ unsigned int *size) ++{ ++ if (hdev->vendor == I2C_VENDOR_ID_GOODIX && ++ (hdev->product == I2C_DEVICE_ID_GOODIX_01E8 || ++ hdev->product == I2C_DEVICE_ID_GOODIX_01E9)) { ++ if (rdesc[607] == 0x15) { ++ rdesc[607] = 0x25; ++ dev_info( ++ &hdev->dev, ++ "GT7868Q report descriptor fixup is applied.\n"); ++ } else { ++ dev_info( ++ &hdev->dev, ++ "The byte is not expected for fixing the report descriptor. \ ++It's possible that the touchpad firmware is not suitable for applying the fix. \ ++got: %x\n", ++ rdesc[607]); ++ } ++ } ++ ++ return rdesc; ++} ++ + static void mt_report(struct hid_device *hid, struct hid_report *report) + { + struct mt_device *td = hid_get_drvdata(hid); +@@ -2038,6 +2062,14 @@ static const struct hid_device_id mt_devices[] = { + MT_BT_DEVICE(USB_VENDOR_ID_FRUCTEL, + USB_DEVICE_ID_GAMETEL_MT_MODE) }, + ++ /* Goodix GT7868Q devices */ ++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU, ++ HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX, ++ I2C_DEVICE_ID_GOODIX_01E8) }, ++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU, ++ HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX, ++ I2C_DEVICE_ID_GOODIX_01E8) }, ++ + /* GoodTouch panels */ + { .driver_data = MT_CLS_NSMU, + MT_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH, +@@ -2273,6 +2305,7 @@ static struct hid_driver mt_driver = { + .feature_mapping = mt_feature_mapping, + .usage_table = mt_grabbed_usages, + .event = mt_event, ++ .report_fixup = mt_report_fixup, + .report = mt_report, + #ifdef CONFIG_PM + .suspend = mt_suspend, +diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h +index b0832a4c690d7f..76c2b364c3fe40 100644 +--- a/drivers/hwmon/pmbus/pmbus.h ++++ b/drivers/hwmon/pmbus/pmbus.h +@@ -409,6 +409,12 @@ enum pmbus_sensor_classes { + enum pmbus_data_format { linear = 0, ieee754, direct, vid }; + enum vrm_version { vr11 = 0, vr12, vr13, imvp9, amd625mv }; + ++/* PMBus revision identifiers */ ++#define PMBUS_REV_10 0x00 /* PMBus revision 1.0 */ ++#define PMBUS_REV_11 0x11 /* PMBus revision 1.1 */ ++#define PMBUS_REV_12 0x22 /* PMBus revision 1.2 */ ++#define PMBUS_REV_13 0x33 /* PMBus revision 1.3 */ ++ + struct pmbus_driver_info { + int pages; /* Total number of pages */ + u8 phases[PMBUS_PAGES]; /* Number of phases per page */ +diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c +index 1363d9f89181d2..728c07c42651ce 100644 +--- a/drivers/hwmon/pmbus/pmbus_core.c ++++ b/drivers/hwmon/pmbus/pmbus_core.c +@@ -85,6 +85,8 @@ struct pmbus_data { + + u32 flags; /* from platform data */ + ++ u8 revision; /* The PMBus revision the device is compliant with */ ++ + int exponent[PMBUS_PAGES]; + /* linear mode: exponent for output voltages */ + +@@ -1095,9 +1097,14 @@ static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b, + + regval = status & mask; + if (regval) { +- ret = _pmbus_write_byte_data(client, page, reg, regval); +- if (ret) +- goto unlock; ++ if (data->revision >= PMBUS_REV_12) { ++ ret = _pmbus_write_byte_data(client, page, reg, regval); ++ if (ret) ++ goto unlock; ++ } else { ++ pmbus_clear_fault_page(client, page); ++ } ++ + } + if (s1 && s2) { + s64 v1, v2; +@@ -2640,6 +2647,10 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data, + data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK; + } + ++ ret = i2c_smbus_read_byte_data(client, PMBUS_REVISION); ++ if (ret >= 0) ++ data->revision = ret; ++ + if (data->info->pages) + pmbus_clear_faults(client); + else +diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c +index 2976c62b58c075..d2fe0269b6d3af 100644 +--- a/drivers/iio/adc/ad7124.c ++++ b/drivers/iio/adc/ad7124.c +@@ -14,7 +14,8 @@ + #include + #include + #include +-#include ++#include ++#include + #include + #include + +@@ -812,22 +813,19 @@ static int ad7124_check_chip_id(struct ad7124_state *st) + return 0; + } + +-static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev, +- struct device_node *np) ++static int ad7124_parse_channel_config(struct iio_dev *indio_dev, ++ struct device *dev) + { + struct ad7124_state *st = iio_priv(indio_dev); + struct ad7124_channel_config *cfg; + struct ad7124_channel *channels; +- struct device_node *child; + struct iio_chan_spec *chan; + unsigned int ain[2], channel = 0, tmp; + int ret; + +- st->num_channels = of_get_available_child_count(np); +- if (!st->num_channels) { +- dev_err(indio_dev->dev.parent, "no channel children\n"); +- return -ENODEV; +- } ++ st->num_channels = device_get_child_node_count(dev); ++ if (!st->num_channels) ++ return dev_err_probe(dev, -ENODEV, "no channel children\n"); + + chan = devm_kcalloc(indio_dev->dev.parent, st->num_channels, + sizeof(*chan), GFP_KERNEL); +@@ -843,39 +841,37 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev, + indio_dev->num_channels = st->num_channels; + st->channels = channels; + +- for_each_available_child_of_node(np, child) { +- cfg = &st->channels[channel].cfg; +- +- ret = of_property_read_u32(child, "reg", &channel); ++ device_for_each_child_node_scoped(dev, child) { ++ ret = fwnode_property_read_u32(child, "reg", &channel); + if (ret) +- goto err; ++ return ret; + +- if (channel >= indio_dev->num_channels) { +- dev_err(indio_dev->dev.parent, ++ if (channel >= indio_dev->num_channels) ++ return dev_err_probe(dev, -EINVAL, + "Channel index >= number of channels\n"); +- ret = -EINVAL; +- goto err; +- } + +- ret = of_property_read_u32_array(child, "diff-channels", +- ain, 2); ++ ret = fwnode_property_read_u32_array(child, "diff-channels", ++ ain, 2); + if (ret) +- goto err; ++ return ret; + + st->channels[channel].nr = channel; + st->channels[channel].ain = AD7124_CHANNEL_AINP(ain[0]) | + AD7124_CHANNEL_AINM(ain[1]); + +- cfg->bipolar = of_property_read_bool(child, "bipolar"); ++ cfg = &st->channels[channel].cfg; ++ cfg->bipolar = fwnode_property_read_bool(child, "bipolar"); + +- ret = of_property_read_u32(child, "adi,reference-select", &tmp); ++ ret = fwnode_property_read_u32(child, "adi,reference-select", &tmp); + if (ret) + cfg->refsel = AD7124_INT_REF; + else + cfg->refsel = tmp; + +- cfg->buf_positive = of_property_read_bool(child, "adi,buffered-positive"); +- cfg->buf_negative = of_property_read_bool(child, "adi,buffered-negative"); ++ cfg->buf_positive = ++ fwnode_property_read_bool(child, "adi,buffered-positive"); ++ cfg->buf_negative = ++ fwnode_property_read_bool(child, "adi,buffered-negative"); + + chan[channel] = ad7124_channel_template; + chan[channel].address = channel; +@@ -885,10 +881,6 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev, + } + + return 0; +-err: +- of_node_put(child); +- +- return ret; + } + + static int ad7124_setup(struct ad7124_state *st) +@@ -948,9 +940,7 @@ static int ad7124_probe(struct spi_device *spi) + struct iio_dev *indio_dev; + int i, ret; + +- info = of_device_get_match_data(&spi->dev); +- if (!info) +- info = (void *)spi_get_device_id(spi)->driver_data; ++ info = spi_get_device_match_data(spi); + if (!info) + return -ENODEV; + +@@ -970,7 +960,7 @@ static int ad7124_probe(struct spi_device *spi) + if (ret < 0) + return ret; + +- ret = ad7124_of_parse_channel_config(indio_dev, spi->dev.of_node); ++ ret = ad7124_parse_channel_config(indio_dev, &spi->dev); + if (ret < 0) + return ret; + +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c +index 45a497c0258b30..2d179bc56ce608 100644 +--- a/drivers/infiniband/hw/mlx5/main.c ++++ b/drivers/infiniband/hw/mlx5/main.c +@@ -444,7 +444,7 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed, + *active_width = IB_WIDTH_2X; + *active_speed = IB_SPEED_NDR; + break; +- case MLX5E_PROT_MASK(MLX5E_400GAUI_8): ++ case MLX5E_PROT_MASK(MLX5E_400GAUI_8_400GBASE_CR8): + *active_width = IB_WIDTH_8X; + *active_speed = IB_SPEED_HDR; + break; +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c +index 7a303a9d6bf72b..cff3393f0dd000 100644 +--- a/drivers/input/mouse/synaptics.c ++++ b/drivers/input/mouse/synaptics.c +@@ -189,6 +189,7 @@ static const char * const smbus_pnp_ids[] = { + "LEN2054", /* E480 */ + "LEN2055", /* E580 */ + "LEN2068", /* T14 Gen 1 */ ++ "SYN3015", /* HP EliteBook 840 G2 */ + "SYN3052", /* HP EliteBook 840 G4 */ + "SYN3221", /* HP 15-ay000 */ + "SYN323d", /* HP Spectre X360 13-w013dx */ +diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h +index e9eb9554dd7bdc..bad238f69a7afd 100644 +--- a/drivers/input/serio/i8042-acpipnpio.h ++++ b/drivers/input/serio/i8042-acpipnpio.h +@@ -627,6 +627,15 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { + }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) + }, ++ { ++ /* Fujitsu Lifebook E756 */ ++ /* https://bugzilla.suse.com/show_bug.cgi?id=1229056 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E756"), ++ }, ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX) ++ }, + { + /* Fujitsu Lifebook E5411 */ + .matches = { +diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c +index a66375700a630d..8b8c43b3c27f29 100644 +--- a/drivers/input/touchscreen/ads7846.c ++++ b/drivers/input/touchscreen/ads7846.c +@@ -808,7 +808,7 @@ static void ads7846_read_state(struct ads7846 *ts) + m = &ts->msg[msg_idx]; + error = spi_sync(ts->spi, m); + if (error) { +- dev_err(&ts->spi->dev, "spi_sync --> %d\n", error); ++ dev_err_ratelimited(&ts->spi->dev, "spi_sync --> %d\n", error); + packet->ignore = true; + return; + } +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c +index 470add73f7bdac..a36dd749c688e1 100644 +--- a/drivers/md/dm-integrity.c ++++ b/drivers/md/dm-integrity.c +@@ -2183,6 +2183,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map + struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); + unsigned int journal_section, journal_entry; + unsigned int journal_read_pos; ++ sector_t recalc_sector; + struct completion read_comp; + bool discard_retried = false; + bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ; +@@ -2323,6 +2324,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map + goto lock_retry; + } + } ++ recalc_sector = le64_to_cpu(ic->sb->recalc_sector); + spin_unlock_irq(&ic->endio_wait.lock); + + if (unlikely(journal_read_pos != NOT_FOUND)) { +@@ -2377,7 +2379,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map + if (need_sync_io) { + wait_for_completion_io(&read_comp); + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && +- dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector)) ++ dio->range.logical_sector + dio->range.n_sectors > recalc_sector) + goto skip_check; + if (ic->mode == 'B') { + if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector, +diff --git a/drivers/misc/eeprom/digsy_mtc_eeprom.c b/drivers/misc/eeprom/digsy_mtc_eeprom.c +index f1f766b709657b..4eddc5ba1af9c8 100644 +--- a/drivers/misc/eeprom/digsy_mtc_eeprom.c ++++ b/drivers/misc/eeprom/digsy_mtc_eeprom.c +@@ -42,7 +42,7 @@ static void digsy_mtc_op_finish(void *p) + } + + struct eeprom_93xx46_platform_data digsy_mtc_eeprom_data = { +- .flags = EE_ADDR8, ++ .flags = EE_ADDR8 | EE_SIZE1K, + .prepare = digsy_mtc_op_prepare, + .finish = digsy_mtc_op_finish, + }; +diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c +index 3c5509e75a5486..afb5dae4439ce6 100644 +--- a/drivers/net/dsa/ocelot/felix_vsc9959.c ++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c +@@ -1474,10 +1474,13 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port, + /* Hardware errata - Admin config could not be overwritten if + * config is pending, need reset the TAS module + */ +- val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8); +- if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) { +- ret = -EBUSY; +- goto err_reset_tc; ++ val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port); ++ if (val & QSYS_TAG_CONFIG_ENABLE) { ++ val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8); ++ if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) { ++ ret = -EBUSY; ++ goto err_reset_tc; ++ } + } + + ocelot_rmw_rix(ocelot, +diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h +index 63b3e02fab162e..4968f6f0bdbc25 100644 +--- a/drivers/net/ethernet/faraday/ftgmac100.h ++++ b/drivers/net/ethernet/faraday/ftgmac100.h +@@ -84,7 +84,7 @@ + FTGMAC100_INT_RPKT_BUF) + + /* All the interrupts we care about */ +-#define FTGMAC100_INT_ALL (FTGMAC100_INT_RPKT_BUF | \ ++#define FTGMAC100_INT_ALL (FTGMAC100_INT_RXTX | \ + FTGMAC100_INT_BAD) + + /* +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +index c6a3eefd83bff9..e7bf70ac9a4ca5 100644 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +@@ -2285,12 +2285,12 @@ static netdev_tx_t + dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) + { + const int queue_mapping = skb_get_queue_mapping(skb); +- bool nonlinear = skb_is_nonlinear(skb); + struct rtnl_link_stats64 *percpu_stats; + struct dpaa_percpu_priv *percpu_priv; + struct netdev_queue *txq; + struct dpaa_priv *priv; + struct qm_fd fd; ++ bool nonlinear; + int offset = 0; + int err = 0; + +@@ -2300,6 +2300,13 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) + + qm_fd_clear_fd(&fd); + ++ /* Packet data is always read as 32-bit words, so zero out any part of ++ * the skb which might be sent if we have to pad the packet ++ */ ++ if (__skb_put_padto(skb, ETH_ZLEN, false)) ++ goto enomem; ++ ++ nonlinear = skb_is_nonlinear(skb); + if (!nonlinear) { + /* We're going to store the skb backpointer at the beginning + * of the data buffer, so we need a privately owned skb +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +index 92c592c177e67a..9650ce594e2fdd 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +@@ -11370,7 +11370,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev) + + pcim_iounmap(pdev, hdev->hw.hw.io_base); + pci_free_irq_vectors(pdev); +- pci_release_mem_regions(pdev); ++ pci_release_regions(pdev); + pci_disable_device(pdev); + } + +diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c +index b3010a53f1b457..3a0ef56d3edcac 100644 +--- a/drivers/net/ethernet/intel/ice/ice_lib.c ++++ b/drivers/net/ethernet/intel/ice/ice_lib.c +@@ -2600,13 +2600,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi) + struct ice_pf *pf = vsi->back; + int err; + +- /* The Rx rule will only exist to remove if the LLDP FW +- * engine is currently stopped +- */ +- if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF && +- !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) +- ice_cfg_sw_lldp(vsi, false, false); +- + ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); + err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); + if (err) +@@ -2953,6 +2946,14 @@ int ice_vsi_release(struct ice_vsi *vsi) + ice_rss_clean(vsi); + + ice_vsi_close(vsi); ++ ++ /* The Rx rule will only exist to remove if the LLDP FW ++ * engine is currently stopped ++ */ ++ if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF && ++ !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) ++ ice_cfg_sw_lldp(vsi, false, false); ++ + ice_vsi_decfg(vsi); + + /* retain SW VSI data structure since it is needed to unregister and +diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c +index 88ee2491312a55..355716e6bcc82d 100644 +--- a/drivers/net/ethernet/intel/ice/ice_switch.c ++++ b/drivers/net/ethernet/intel/ice/ice_switch.c +@@ -3072,7 +3072,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, + + /* A rule already exists with the new VSI being added */ + if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) +- return 0; ++ return -EEXIST; + + /* Update the previously created VSI list set with + * the new VSI ID passed in +@@ -3142,7 +3142,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, + + list_head = &sw->recp_list[recp_id].filt_rules; + list_for_each_entry(list_itr, list_head, list_entry) { +- if (list_itr->vsi_list_info) { ++ if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) { + map_info = list_itr->vsi_list_info; + if (test_bit(vsi_handle, map_info->vsi_map)) { + *vsi_list_id = map_info->vsi_list_id; +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c +index fa268d7bd1bc3c..986bcbf0a6abaf 100644 +--- a/drivers/net/ethernet/intel/igb/igb_main.c ++++ b/drivers/net/ethernet/intel/igb/igb_main.c +@@ -33,6 +33,7 @@ + #include + #include + #include ++#include + #ifdef CONFIG_IGB_DCA + #include + #endif +@@ -2939,8 +2940,11 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) + } + } + ++/* This function assumes __netif_tx_lock is held by the caller. */ + static void igb_xdp_ring_update_tail(struct igb_ring *ring) + { ++ lockdep_assert_held(&txring_txq(ring)->_xmit_lock); ++ + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + */ +@@ -3025,11 +3029,11 @@ static int igb_xdp_xmit(struct net_device *dev, int n, + nxmit++; + } + +- __netif_tx_unlock(nq); +- + if (unlikely(flags & XDP_XMIT_FLUSH)) + igb_xdp_ring_update_tail(tx_ring); + ++ __netif_tx_unlock(nq); ++ + return nxmit; + } + +@@ -8889,12 +8893,14 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring, + + static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) + { ++ unsigned int total_bytes = 0, total_packets = 0; + struct igb_adapter *adapter = q_vector->adapter; + struct igb_ring *rx_ring = q_vector->rx.ring; +- struct sk_buff *skb = rx_ring->skb; +- unsigned int total_bytes = 0, total_packets = 0; + u16 cleaned_count = igb_desc_unused(rx_ring); ++ struct sk_buff *skb = rx_ring->skb; ++ int cpu = smp_processor_id(); + unsigned int xdp_xmit = 0; ++ struct netdev_queue *nq; + struct xdp_buff xdp; + u32 frame_sz = 0; + int rx_buf_pgcnt; +@@ -9022,7 +9028,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) + if (xdp_xmit & IGB_XDP_TX) { + struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter); + ++ nq = txring_txq(tx_ring); ++ __netif_tx_lock(nq, cpu); + igb_xdp_ring_update_tail(tx_ring); ++ __netif_tx_unlock(nq); + } + + u64_stats_update_begin(&rx_ring->rx_syncp); +diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c +index 1732ec3c3dbdc4..a718207988f2c4 100644 +--- a/drivers/net/ethernet/jme.c ++++ b/drivers/net/ethernet/jme.c +@@ -946,15 +946,13 @@ jme_udpsum(struct sk_buff *skb) + if (skb->protocol != htons(ETH_P_IP)) + return csum; + skb_set_network_header(skb, ETH_HLEN); +- if ((ip_hdr(skb)->protocol != IPPROTO_UDP) || +- (skb->len < (ETH_HLEN + +- (ip_hdr(skb)->ihl << 2) + +- sizeof(struct udphdr)))) { ++ ++ if (ip_hdr(skb)->protocol != IPPROTO_UDP || ++ skb->len < (ETH_HLEN + ip_hdrlen(skb) + sizeof(struct udphdr))) { + skb_reset_network_header(skb); + return csum; + } +- skb_set_transport_header(skb, +- ETH_HLEN + (ip_hdr(skb)->ihl << 2)); ++ skb_set_transport_header(skb, ETH_HLEN + ip_hdrlen(skb)); + csum = udp_hdr(skb)->check; + skb_reset_transport_header(skb); + skb_reset_network_header(skb); +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +index 185c296eaaf0d4..e81cfcaf9ce4fe 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +@@ -290,6 +290,7 @@ struct nix_mark_format { + + /* smq(flush) to tl1 cir/pir info */ + struct nix_smq_tree_ctx { ++ u16 schq; + u64 cir_off; + u64 cir_val; + u64 pir_off; +@@ -299,8 +300,6 @@ struct nix_smq_tree_ctx { + /* smq flush context */ + struct nix_smq_flush_ctx { + int smq; +- u16 tl1_schq; +- u16 tl2_schq; + struct nix_smq_tree_ctx smq_tree_ctx[NIX_TXSCH_LVL_CNT]; + }; + +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +index a07e5c8786c4b4..224a025283ca7d 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +@@ -2146,14 +2146,13 @@ static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq, + schq = smq; + for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { + smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; ++ smq_tree_ctx->schq = schq; + if (lvl == NIX_TXSCH_LVL_TL1) { +- smq_flush_ctx->tl1_schq = schq; + smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq); + smq_tree_ctx->pir_off = 0; + smq_tree_ctx->pir_val = 0; + parent_off = 0; + } else if (lvl == NIX_TXSCH_LVL_TL2) { +- smq_flush_ctx->tl2_schq = schq; + smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq); + smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq); + parent_off = NIX_AF_TL2X_PARENT(schq); +@@ -2188,8 +2187,8 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr, + { + struct nix_txsch *txsch; + struct nix_hw *nix_hw; ++ int tl2, tl2_schq; + u64 regoff; +- int tl2; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) +@@ -2197,16 +2196,17 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr, + + /* loop through all TL2s with matching PF_FUNC */ + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; ++ tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq; + for (tl2 = 0; tl2 < txsch->schq.max; tl2++) { + /* skip the smq(flush) TL2 */ +- if (tl2 == smq_flush_ctx->tl2_schq) ++ if (tl2 == tl2_schq) + continue; + /* skip unused TL2s */ + if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE) + continue; + /* skip if PF_FUNC doesn't match */ + if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) != +- (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] & ++ (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] & + ~RVU_PFVF_FUNC_MASK))) + continue; + /* enable/disable XOFF */ +@@ -2248,10 +2248,12 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr, + int smq, u16 pcifunc, int nixlf) + { + struct nix_smq_flush_ctx *smq_flush_ctx; ++ int err, restore_tx_en = 0, i; + int pf = rvu_get_pf(pcifunc); + u8 cgx_id = 0, lmac_id = 0; +- int err, restore_tx_en = 0; +- u64 cfg; ++ u16 tl2_tl3_link_schq; ++ u8 link, link_level; ++ u64 cfg, bmap = 0; + + if (!is_rvu_otx2(rvu)) { + /* Skip SMQ flush if pkt count is zero */ +@@ -2275,16 +2277,38 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr, + nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true); + nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false); + +- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); +- /* Do SMQ flush and set enqueue xoff */ +- cfg |= BIT_ULL(50) | BIT_ULL(49); +- rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); +- + /* Disable backpressure from physical link, + * otherwise SMQ flush may stall. + */ + rvu_cgx_enadis_rx_bp(rvu, pf, false); + ++ link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? ++ NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; ++ tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq; ++ link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq; ++ ++ /* SMQ set enqueue xoff */ ++ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); ++ cfg |= BIT_ULL(50); ++ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); ++ ++ /* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */ ++ for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) { ++ cfg = rvu_read64(rvu, blkaddr, ++ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link)); ++ if (!(cfg & BIT_ULL(12))) ++ continue; ++ bmap |= (1 << i); ++ cfg &= ~BIT_ULL(12); ++ rvu_write64(rvu, blkaddr, ++ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg); ++ } ++ ++ /* Do SMQ flush and set enqueue xoff */ ++ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); ++ cfg |= BIT_ULL(50) | BIT_ULL(49); ++ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); ++ + /* Wait for flush to complete */ + err = rvu_poll_reg(rvu, blkaddr, + NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); +@@ -2293,6 +2317,17 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr, + "NIXLF%d: SMQ%d flush failed, txlink might be busy\n", + nixlf, smq); + ++ /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */ ++ for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) { ++ if (!(bmap & (1 << i))) ++ continue; ++ cfg = rvu_read64(rvu, blkaddr, ++ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link)); ++ cfg |= BIT_ULL(12); ++ rvu_write64(rvu, blkaddr, ++ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg); ++ } ++ + /* clear XOFF on TL2s */ + nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true); + nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +index 50db127e6371bb..54379297a7489e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +@@ -136,6 +136,10 @@ void mlx5e_build_ptys2ethtool_map(void) + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, legacy, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT); ++ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100BASE_TX, legacy, ++ ETHTOOL_LINK_MODE_100baseT_Full_BIT); ++ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_T, legacy, ++ ETHTOOL_LINK_MODE_1000baseT_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, legacy, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, legacy, +@@ -201,6 +205,12 @@ void mlx5e_build_ptys2ethtool_map(void) + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT); ++ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_400GAUI_8_400GBASE_CR8, ext, ++ ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, ++ ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, ++ ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, ++ ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, ++ ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GAUI_1_100GBASE_CR_KR, ext, + ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c +index 255bc8b749f9a5..8587cd572da536 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c +@@ -319,7 +319,7 @@ int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting) + return -EPERM; + + mutex_lock(&esw->state_lock); +- if (esw->mode != MLX5_ESWITCH_LEGACY) { ++ if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw)) { + err = -EOPNOTSUPP; + goto out; + } +@@ -339,7 +339,7 @@ int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting) + if (!mlx5_esw_allowed(esw)) + return -EPERM; + +- if (esw->mode != MLX5_ESWITCH_LEGACY) ++ if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw)) + return -EOPNOTSUPP; + + *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +index 1887a24ee414d0..cc0f2be21a265a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +@@ -311,6 +311,25 @@ static int esw_qos_set_group_max_rate(struct mlx5_eswitch *esw, + return err; + } + ++static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type) ++{ ++ switch (type) { ++ case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR: ++ return MLX5_CAP_QOS(dev, esw_element_type) & ++ ELEMENT_TYPE_CAP_MASK_TSAR; ++ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT: ++ return MLX5_CAP_QOS(dev, esw_element_type) & ++ ELEMENT_TYPE_CAP_MASK_VPORT; ++ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC: ++ return MLX5_CAP_QOS(dev, esw_element_type) & ++ ELEMENT_TYPE_CAP_MASK_VPORT_TC; ++ case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC: ++ return MLX5_CAP_QOS(dev, esw_element_type) & ++ ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC; ++ } ++ return false; ++} ++ + static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, + u32 max_rate, u32 bw_share) +@@ -322,6 +341,9 @@ static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw, + void *vport_elem; + int err; + ++ if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT)) ++ return -EOPNOTSUPP; ++ + parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix; + MLX5_SET(scheduling_context, sched_ctx, element_type, + SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); +@@ -420,6 +442,7 @@ __esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *ex + { + u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; + struct mlx5_esw_rate_group *group; ++ __be32 *attr; + u32 divider; + int err; + +@@ -427,6 +450,12 @@ __esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *ex + if (!group) + return ERR_PTR(-ENOMEM); + ++ MLX5_SET(scheduling_context, tsar_ctx, element_type, ++ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR); ++ ++ attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes); ++ *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16); ++ + MLX5_SET(scheduling_context, tsar_ctx, parent_element_id, + esw->qos.root_tsar_ix); + err = mlx5_create_scheduling_element_cmd(esw->dev, +@@ -525,25 +554,6 @@ static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw, + return err; + } + +-static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type) +-{ +- switch (type) { +- case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR: +- return MLX5_CAP_QOS(dev, esw_element_type) & +- ELEMENT_TYPE_CAP_MASK_TASR; +- case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT: +- return MLX5_CAP_QOS(dev, esw_element_type) & +- ELEMENT_TYPE_CAP_MASK_VPORT; +- case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC: +- return MLX5_CAP_QOS(dev, esw_element_type) & +- ELEMENT_TYPE_CAP_MASK_VPORT_TC; +- case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC: +- return MLX5_CAP_QOS(dev, esw_element_type) & +- ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC; +- } +- return false; +-} +- + static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) + { + u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; +@@ -554,7 +564,8 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta + if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) + return -EOPNOTSUPP; + +- if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR)) ++ if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR) || ++ !(MLX5_CAP_QOS(dev, esw_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR)) + return -EOPNOTSUPP; + + MLX5_SET(scheduling_context, tsar_ctx, element_type, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index 11f11248feb8b7..96136229b1b070 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -2205,6 +2205,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = { + { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */ + { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */ + { PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */ ++ { PCI_VDEVICE(MELLANOX, 0x1025) }, /* ConnectX-9 */ + { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ + { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ + { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c +index be70d1f23a5da3..749f0fc2c189ad 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c +@@ -1098,7 +1098,7 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = { + [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000, + [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000, + [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000, +- [MLX5E_400GAUI_8] = 400000, ++ [MLX5E_400GAUI_8_400GBASE_CR8] = 400000, + [MLX5E_100GAUI_1_100GBASE_CR_KR] = 100000, + [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = 200000, + [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = 400000, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/qos.c +index 8bce730b5c5bef..db2bd3ad63ba36 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/qos.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.c +@@ -28,6 +28,9 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id, + { + u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; + ++ if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP)) ++ return -EOPNOTSUPP; ++ + MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id); + MLX5_SET(scheduling_context, sched_ctx, element_type, + SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP); +@@ -44,6 +47,10 @@ int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id, + u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; + void *attr; + ++ if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_TSAR) || ++ !(MLX5_CAP_QOS(mdev, nic_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR)) ++ return -EOPNOTSUPP; ++ + MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id); + MLX5_SET(scheduling_context, sched_ctx, element_type, + SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR); +diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h +index f09f10f17d7eaa..2facbdfbb319e7 100644 +--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h +@@ -419,6 +419,8 @@ struct axidma_bd { + * @tx_bytes: TX byte count for statistics + * @tx_stat_sync: Synchronization object for TX stats + * @dma_err_task: Work structure to process Axi DMA errors ++ * @stopping: Set when @dma_err_task shouldn't do anything because we are ++ * about to stop the device. + * @tx_irq: Axidma TX IRQ number + * @rx_irq: Axidma RX IRQ number + * @eth_irq: Ethernet core IRQ number +@@ -481,6 +483,7 @@ struct axienet_local { + struct u64_stats_sync tx_stat_sync; + + struct work_struct dma_err_task; ++ bool stopping; + + int tx_irq; + int rx_irq; +diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +index 144feb7a2fdac6..65d7aaad43fe90 100644 +--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +@@ -1162,6 +1162,7 @@ static int axienet_open(struct net_device *ndev) + phylink_start(lp->phylink); + + /* Enable worker thread for Axi DMA error handling */ ++ lp->stopping = false; + INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); + + napi_enable(&lp->napi_rx); +@@ -1217,6 +1218,9 @@ static int axienet_stop(struct net_device *ndev) + + dev_dbg(&ndev->dev, "axienet_close()\n"); + ++ WRITE_ONCE(lp->stopping, true); ++ flush_work(&lp->dma_err_task); ++ + napi_disable(&lp->napi_tx); + napi_disable(&lp->napi_rx); + +@@ -1761,6 +1765,10 @@ static void axienet_dma_err_handler(struct work_struct *work) + dma_err_task); + struct net_device *ndev = lp->ndev; + ++ /* Don't bother if we are going to stop anyway */ ++ if (READ_ONCE(lp->stopping)) ++ return; ++ + napi_disable(&lp->napi_tx); + napi_disable(&lp->napi_rx); + +diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c +index 897b979ec03c81..3b5fcaf0dd36db 100644 +--- a/drivers/net/phy/vitesse.c ++++ b/drivers/net/phy/vitesse.c +@@ -237,16 +237,6 @@ static int vsc739x_config_init(struct phy_device *phydev) + return 0; + } + +-static int vsc73xx_config_aneg(struct phy_device *phydev) +-{ +- /* The VSC73xx switches does not like to be instructed to +- * do autonegotiation in any way, it prefers that you just go +- * with the power-on/reset defaults. Writing some registers will +- * just make autonegotiation permanently fail. +- */ +- return 0; +-} +- + /* This adds a skew for both TX and RX clocks, so the skew should only be + * applied to "rgmii-id" interfaces. It may not work as expected + * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. +@@ -444,7 +434,6 @@ static struct phy_driver vsc82xx_driver[] = { + .phy_id_mask = 0x000ffff0, + /* PHY_GBIT_FEATURES */ + .config_init = vsc738x_config_init, +- .config_aneg = vsc73xx_config_aneg, + .read_page = vsc73xx_read_page, + .write_page = vsc73xx_write_page, + }, { +@@ -453,7 +442,6 @@ static struct phy_driver vsc82xx_driver[] = { + .phy_id_mask = 0x000ffff0, + /* PHY_GBIT_FEATURES */ + .config_init = vsc738x_config_init, +- .config_aneg = vsc73xx_config_aneg, + .read_page = vsc73xx_read_page, + .write_page = vsc73xx_write_page, + }, { +@@ -462,7 +450,6 @@ static struct phy_driver vsc82xx_driver[] = { + .phy_id_mask = 0x000ffff0, + /* PHY_GBIT_FEATURES */ + .config_init = vsc739x_config_init, +- .config_aneg = vsc73xx_config_aneg, + .read_page = vsc73xx_read_page, + .write_page = vsc73xx_write_page, + }, { +@@ -471,7 +458,6 @@ static struct phy_driver vsc82xx_driver[] = { + .phy_id_mask = 0x000ffff0, + /* PHY_GBIT_FEATURES */ + .config_init = vsc739x_config_init, +- .config_aneg = vsc73xx_config_aneg, + .read_page = vsc73xx_read_page, + .write_page = vsc73xx_write_page, + }, { +diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c +index 6eeef10edadad1..46afb95ffabe3b 100644 +--- a/drivers/net/usb/ipheth.c ++++ b/drivers/net/usb/ipheth.c +@@ -286,10 +286,11 @@ static void ipheth_rcvbulk_callback(struct urb *urb) + return; + } + +- if (urb->actual_length <= IPHETH_IP_ALIGN) { +- dev->net->stats.rx_length_errors++; +- return; +- } ++ /* iPhone may periodically send URBs with no payload ++ * on the "bulk in" endpoint. It is safe to ignore them. ++ */ ++ if (urb->actual_length == 0) ++ goto rx_submit; + + /* RX URBs starting with 0x00 0x01 do not encapsulate Ethernet frames, + * but rather are control frames. Their purpose is not documented, and +@@ -298,7 +299,8 @@ static void ipheth_rcvbulk_callback(struct urb *urb) + * URB received from the bulk IN endpoint. + */ + if (unlikely +- (((char *)urb->transfer_buffer)[0] == 0 && ++ (urb->actual_length == 4 && ++ ((char *)urb->transfer_buffer)[0] == 0 && + ((char *)urb->transfer_buffer)[1] == 1)) + goto rx_submit; + +@@ -306,7 +308,6 @@ static void ipheth_rcvbulk_callback(struct urb *urb) + if (retval != 0) { + dev_err(&dev->intf->dev, "%s: callback retval: %d\n", + __func__, retval); +- return; + } + + rx_submit: +@@ -354,13 +355,14 @@ static int ipheth_carrier_set(struct ipheth_device *dev) + 0x02, /* index */ + dev->ctrl_buf, IPHETH_CTRL_BUF_SIZE, + IPHETH_CTRL_TIMEOUT); +- if (retval < 0) { ++ if (retval <= 0) { + dev_err(&dev->intf->dev, "%s: usb_control_msg: %d\n", + __func__, retval); + return retval; + } + +- if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON) { ++ if ((retval == 1 && dev->ctrl_buf[0] == IPHETH_CARRIER_ON) || ++ (retval >= 2 && dev->ctrl_buf[1] == IPHETH_CARRIER_ON)) { + netif_carrier_on(dev->net); + if (dev->tx_urb->status != -EINPROGRESS) + netif_wake_queue(dev->net); +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c +index 6a5c2cae087d03..6dec54431312ad 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c +@@ -1095,7 +1095,7 @@ static void mt7921_ipv6_addr_change(struct ieee80211_hw *hw, + struct inet6_dev *idev) + { + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; +- struct mt792x_dev *dev = mvif->phy->dev; ++ struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct inet6_ifaddr *ifa; + struct in6_addr ns_addrs[IEEE80211_BSS_ARP_ADDR_LIST_LEN]; + struct sk_buff *skb; +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c +index e7fd1315d7edc2..f28c005c2bb265 100644 +--- a/drivers/nvmem/core.c ++++ b/drivers/nvmem/core.c +@@ -2131,6 +2131,19 @@ const char *nvmem_dev_name(struct nvmem_device *nvmem) + } + EXPORT_SYMBOL_GPL(nvmem_dev_name); + ++/** ++ * nvmem_dev_size() - Get the size of a given nvmem device. ++ * ++ * @nvmem: nvmem device. ++ * ++ * Return: size of the nvmem device. ++ */ ++size_t nvmem_dev_size(struct nvmem_device *nvmem) ++{ ++ return nvmem->size; ++} ++EXPORT_SYMBOL_GPL(nvmem_dev_size); ++ + static int __init nvmem_init(void) + { + return bus_register(&nvmem_bus_type); +diff --git a/drivers/nvmem/u-boot-env.c b/drivers/nvmem/u-boot-env.c +index c4ae94af4af78e..adabbfdad6fb6d 100644 +--- a/drivers/nvmem/u-boot-env.c ++++ b/drivers/nvmem/u-boot-env.c +@@ -23,13 +23,10 @@ enum u_boot_env_format { + + struct u_boot_env { + struct device *dev; ++ struct nvmem_device *nvmem; + enum u_boot_env_format format; + + struct mtd_info *mtd; +- +- /* Cells */ +- struct nvmem_cell_info *cells; +- int ncells; + }; + + struct u_boot_env_image_single { +@@ -94,70 +91,71 @@ static int u_boot_env_read_post_process_ethaddr(void *context, const char *id, i + static int u_boot_env_add_cells(struct u_boot_env *priv, uint8_t *buf, + size_t data_offset, size_t data_len) + { ++ struct nvmem_device *nvmem = priv->nvmem; + struct device *dev = priv->dev; + char *data = buf + data_offset; + char *var, *value, *eq; +- int idx; +- +- priv->ncells = 0; +- for (var = data; var < data + data_len && *var; var += strlen(var) + 1) +- priv->ncells++; + +- priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL); +- if (!priv->cells) +- return -ENOMEM; +- +- for (var = data, idx = 0; ++ for (var = data; + var < data + data_len && *var; +- var = value + strlen(value) + 1, idx++) { ++ var = value + strlen(value) + 1) { ++ struct nvmem_cell_info info = {}; ++ + eq = strchr(var, '='); + if (!eq) + break; + *eq = '\0'; + value = eq + 1; + +- priv->cells[idx].name = devm_kstrdup(dev, var, GFP_KERNEL); +- if (!priv->cells[idx].name) ++ info.name = devm_kstrdup(dev, var, GFP_KERNEL); ++ if (!info.name) + return -ENOMEM; +- priv->cells[idx].offset = data_offset + value - data; +- priv->cells[idx].bytes = strlen(value); +- priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name); ++ info.offset = data_offset + value - data; ++ info.bytes = strlen(value); ++ info.np = of_get_child_by_name(dev->of_node, info.name); + if (!strcmp(var, "ethaddr")) { +- priv->cells[idx].raw_len = strlen(value); +- priv->cells[idx].bytes = ETH_ALEN; +- priv->cells[idx].read_post_process = u_boot_env_read_post_process_ethaddr; ++ info.raw_len = strlen(value); ++ info.bytes = ETH_ALEN; ++ info.read_post_process = u_boot_env_read_post_process_ethaddr; + } +- } + +- if (WARN_ON(idx != priv->ncells)) +- priv->ncells = idx; ++ nvmem_add_one_cell(nvmem, &info); ++ } + + return 0; + } + + static int u_boot_env_parse(struct u_boot_env *priv) + { ++ struct nvmem_device *nvmem = priv->nvmem; + struct device *dev = priv->dev; + size_t crc32_data_offset; + size_t crc32_data_len; + size_t crc32_offset; ++ __le32 *crc32_addr; + size_t data_offset; + size_t data_len; ++ size_t dev_size; + uint32_t crc32; + uint32_t calc; +- size_t bytes; + uint8_t *buf; ++ int bytes; + int err; + +- buf = kcalloc(1, priv->mtd->size, GFP_KERNEL); ++ dev_size = nvmem_dev_size(nvmem); ++ ++ buf = kzalloc(dev_size, GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto err_out; + } + +- err = mtd_read(priv->mtd, 0, priv->mtd->size, &bytes, buf); +- if ((err && !mtd_is_bitflip(err)) || bytes != priv->mtd->size) { +- dev_err(dev, "Failed to read from mtd: %d\n", err); ++ bytes = nvmem_device_read(nvmem, 0, dev_size, buf); ++ if (bytes < 0) { ++ err = bytes; ++ goto err_kfree; ++ } else if (bytes != dev_size) { ++ err = -EIO; + goto err_kfree; + } + +@@ -178,9 +176,17 @@ static int u_boot_env_parse(struct u_boot_env *priv) + data_offset = offsetof(struct u_boot_env_image_broadcom, data); + break; + } +- crc32 = le32_to_cpu(*(__le32 *)(buf + crc32_offset)); +- crc32_data_len = priv->mtd->size - crc32_data_offset; +- data_len = priv->mtd->size - data_offset; ++ ++ if (dev_size < data_offset) { ++ dev_err(dev, "Device too small for u-boot-env\n"); ++ err = -EIO; ++ goto err_kfree; ++ } ++ ++ crc32_addr = (__le32 *)(buf + crc32_offset); ++ crc32 = le32_to_cpu(*crc32_addr); ++ crc32_data_len = dev_size - crc32_data_offset; ++ data_len = dev_size - data_offset; + + calc = crc32(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L; + if (calc != crc32) { +@@ -189,10 +195,8 @@ static int u_boot_env_parse(struct u_boot_env *priv) + goto err_kfree; + } + +- buf[priv->mtd->size - 1] = '\0'; ++ buf[dev_size - 1] = '\0'; + err = u_boot_env_add_cells(priv, buf, data_offset, data_len); +- if (err) +- dev_err(dev, "Failed to add cells: %d\n", err); + + err_kfree: + kfree(buf); +@@ -209,7 +213,6 @@ static int u_boot_env_probe(struct platform_device *pdev) + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct u_boot_env *priv; +- int err; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) +@@ -224,17 +227,15 @@ static int u_boot_env_probe(struct platform_device *pdev) + return PTR_ERR(priv->mtd); + } + +- err = u_boot_env_parse(priv); +- if (err) +- return err; +- + config.dev = dev; +- config.cells = priv->cells; +- config.ncells = priv->ncells; + config.priv = priv; + config.size = priv->mtd->size; + +- return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config)); ++ priv->nvmem = devm_nvmem_register(dev, &config); ++ if (IS_ERR(priv->nvmem)) ++ return PTR_ERR(priv->nvmem); ++ ++ return u_boot_env_parse(priv); + } + + static const struct of_device_id u_boot_env_of_match_table[] = { +diff --git a/drivers/pinctrl/intel/pinctrl-meteorlake.c b/drivers/pinctrl/intel/pinctrl-meteorlake.c +index 7ced2b402dce04..812696dfe30263 100644 +--- a/drivers/pinctrl/intel/pinctrl-meteorlake.c ++++ b/drivers/pinctrl/intel/pinctrl-meteorlake.c +@@ -583,6 +583,7 @@ static const struct intel_pinctrl_soc_data mtls_soc_data = { + }; + + static const struct acpi_device_id mtl_pinctrl_acpi_match[] = { ++ { "INTC105E", (kernel_ulong_t)&mtlp_soc_data }, + { "INTC1083", (kernel_ulong_t)&mtlp_soc_data }, + { "INTC1082", (kernel_ulong_t)&mtls_soc_data }, + { } +diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c +index 0fe5be53965252..8c5b5f35d8485b 100644 +--- a/drivers/platform/surface/surface_aggregator_registry.c ++++ b/drivers/platform/surface/surface_aggregator_registry.c +@@ -298,7 +298,7 @@ static const struct software_node *ssam_node_group_sp8[] = { + NULL, + }; + +-/* Devices for Surface Pro 9 */ ++/* Devices for Surface Pro 9 and 10 */ + static const struct software_node *ssam_node_group_sp9[] = { + &ssam_node_root, + &ssam_node_hub_kip, +@@ -337,6 +337,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = { + /* Surface Pro 9 */ + { "MSHW0343", (unsigned long)ssam_node_group_sp9 }, + ++ /* Surface Pro 10 */ ++ { "MSHW0510", (unsigned long)ssam_node_group_sp9 }, ++ + /* Surface Book 2 */ + { "MSHW0107", (unsigned long)ssam_node_group_gen5 }, + +@@ -367,6 +370,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = { + /* Surface Laptop Go 2 */ + { "MSHW0290", (unsigned long)ssam_node_group_slg1 }, + ++ /* Surface Laptop Go 3 */ ++ { "MSHW0440", (unsigned long)ssam_node_group_slg1 }, ++ + /* Surface Laptop Studio */ + { "MSHW0123", (unsigned long)ssam_node_group_sls }, + +diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c +index cf845ee1c7b1f0..ebd81846e2d564 100644 +--- a/drivers/platform/x86/panasonic-laptop.c ++++ b/drivers/platform/x86/panasonic-laptop.c +@@ -337,7 +337,8 @@ static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc) + } + + if (pcc->num_sifr < hkey->package.count) { +- pr_err("SQTY reports bad SINF length\n"); ++ pr_err("SQTY reports bad SINF length SQTY: %lu SINF-pkg-count: %u\n", ++ pcc->num_sifr, hkey->package.count); + status = AE_ERROR; + goto end; + } +@@ -773,6 +774,24 @@ static DEVICE_ATTR_RW(dc_brightness); + static DEVICE_ATTR_RW(current_brightness); + static DEVICE_ATTR_RW(cdpower); + ++static umode_t pcc_sysfs_is_visible(struct kobject *kobj, struct attribute *attr, int idx) ++{ ++ struct device *dev = kobj_to_dev(kobj); ++ struct acpi_device *acpi = to_acpi_device(dev); ++ struct pcc_acpi *pcc = acpi_driver_data(acpi); ++ ++ if (attr == &dev_attr_mute.attr) ++ return (pcc->num_sifr > SINF_MUTE) ? attr->mode : 0; ++ ++ if (attr == &dev_attr_eco_mode.attr) ++ return (pcc->num_sifr > SINF_ECO_MODE) ? attr->mode : 0; ++ ++ if (attr == &dev_attr_current_brightness.attr) ++ return (pcc->num_sifr > SINF_CUR_BRIGHT) ? attr->mode : 0; ++ ++ return attr->mode; ++} ++ + static struct attribute *pcc_sysfs_entries[] = { + &dev_attr_numbatt.attr, + &dev_attr_lcdtype.attr, +@@ -787,8 +806,9 @@ static struct attribute *pcc_sysfs_entries[] = { + }; + + static const struct attribute_group pcc_attr_group = { +- .name = NULL, /* put in device directory */ +- .attrs = pcc_sysfs_entries, ++ .name = NULL, /* put in device directory */ ++ .attrs = pcc_sysfs_entries, ++ .is_visible = pcc_sysfs_is_visible, + }; + + +@@ -941,12 +961,15 @@ static int acpi_pcc_hotkey_resume(struct device *dev) + if (!pcc) + return -EINVAL; + +- acpi_pcc_write_sset(pcc, SINF_MUTE, pcc->mute); +- acpi_pcc_write_sset(pcc, SINF_ECO_MODE, pcc->eco_mode); ++ if (pcc->num_sifr > SINF_MUTE) ++ acpi_pcc_write_sset(pcc, SINF_MUTE, pcc->mute); ++ if (pcc->num_sifr > SINF_ECO_MODE) ++ acpi_pcc_write_sset(pcc, SINF_ECO_MODE, pcc->eco_mode); + acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_key); + acpi_pcc_write_sset(pcc, SINF_AC_CUR_BRIGHT, pcc->ac_brightness); + acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, pcc->dc_brightness); +- acpi_pcc_write_sset(pcc, SINF_CUR_BRIGHT, pcc->current_brightness); ++ if (pcc->num_sifr > SINF_CUR_BRIGHT) ++ acpi_pcc_write_sset(pcc, SINF_CUR_BRIGHT, pcc->current_brightness); + + return 0; + } +@@ -963,11 +986,21 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device) + + num_sifr = acpi_pcc_get_sqty(device); + +- if (num_sifr < 0 || num_sifr > 255) { +- pr_err("num_sifr out of range"); ++ /* ++ * pcc->sinf is expected to at least have the AC+DC brightness entries. ++ * Accesses to higher SINF entries are checked against num_sifr. ++ */ ++ if (num_sifr <= SINF_DC_CUR_BRIGHT || num_sifr > 255) { ++ pr_err("num_sifr %d out of range %d - 255\n", num_sifr, SINF_DC_CUR_BRIGHT + 1); + return -ENODEV; + } + ++ /* ++ * Some DSDT-s have an off-by-one bug where the SINF package count is ++ * one higher than the SQTY reported value, allocate 1 entry extra. ++ */ ++ num_sifr++; ++ + pcc = kzalloc(sizeof(struct pcc_acpi), GFP_KERNEL); + if (!pcc) { + pr_err("Couldn't allocate mem for pcc"); +@@ -1020,11 +1053,14 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device) + acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, 0); + pcc->sticky_key = 0; + +- pcc->eco_mode = pcc->sinf[SINF_ECO_MODE]; +- pcc->mute = pcc->sinf[SINF_MUTE]; + pcc->ac_brightness = pcc->sinf[SINF_AC_CUR_BRIGHT]; + pcc->dc_brightness = pcc->sinf[SINF_DC_CUR_BRIGHT]; +- pcc->current_brightness = pcc->sinf[SINF_CUR_BRIGHT]; ++ if (pcc->num_sifr > SINF_MUTE) ++ pcc->mute = pcc->sinf[SINF_MUTE]; ++ if (pcc->num_sifr > SINF_ECO_MODE) ++ pcc->eco_mode = pcc->sinf[SINF_ECO_MODE]; ++ if (pcc->num_sifr > SINF_CUR_BRIGHT) ++ pcc->current_brightness = pcc->sinf[SINF_CUR_BRIGHT]; + + /* add sysfs attributes */ + result = sysfs_create_group(&device->dev.kobj, &pcc_attr_group); +diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c +index cf69d5c415fbfb..68d54887992d91 100644 +--- a/drivers/soundwire/stream.c ++++ b/drivers/soundwire/stream.c +@@ -1286,18 +1286,18 @@ struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave, + unsigned int port_num) + { + struct sdw_dpn_prop *dpn_prop; +- unsigned long mask; ++ u8 num_ports; + int i; + + if (direction == SDW_DATA_DIR_TX) { +- mask = slave->prop.source_ports; ++ num_ports = hweight32(slave->prop.source_ports); + dpn_prop = slave->prop.src_dpn_prop; + } else { +- mask = slave->prop.sink_ports; ++ num_ports = hweight32(slave->prop.sink_ports); + dpn_prop = slave->prop.sink_dpn_prop; + } + +- for_each_set_bit(i, &mask, 32) { ++ for (i = 0; i < num_ports; i++) { + if (dpn_prop[i].num == port_num) + return &dpn_prop[i]; + } +diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c +index f4f376a8351b4a..7401ed3b9acd40 100644 +--- a/drivers/spi/spi-geni-qcom.c ++++ b/drivers/spi/spi-geni-qcom.c +@@ -1110,25 +1110,27 @@ static int spi_geni_probe(struct platform_device *pdev) + spin_lock_init(&mas->lock); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 250); +- pm_runtime_enable(dev); ++ ret = devm_pm_runtime_enable(dev); ++ if (ret) ++ return ret; + + if (device_property_read_bool(&pdev->dev, "spi-slave")) + spi->slave = true; + + ret = geni_icc_get(&mas->se, NULL); + if (ret) +- goto spi_geni_probe_runtime_disable; ++ return ret; + /* Set the bus quota to a reasonable value for register access */ + mas->se.icc_paths[GENI_TO_CORE].avg_bw = Bps_to_icc(CORE_2X_50_MHZ); + mas->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW; + + ret = geni_icc_set_bw(&mas->se); + if (ret) +- goto spi_geni_probe_runtime_disable; ++ return ret; + + ret = spi_geni_init(mas); + if (ret) +- goto spi_geni_probe_runtime_disable; ++ return ret; + + /* + * check the mode supported and set_cs for fifo mode only +@@ -1157,8 +1159,6 @@ static int spi_geni_probe(struct platform_device *pdev) + free_irq(mas->irq, spi); + spi_geni_release_dma: + spi_geni_release_dma_chan(mas); +-spi_geni_probe_runtime_disable: +- pm_runtime_disable(dev); + return ret; + } + +@@ -1170,10 +1170,9 @@ static void spi_geni_remove(struct platform_device *pdev) + /* Unregister _before_ disabling pm_runtime() so we stop transfers */ + spi_unregister_master(spi); + +- spi_geni_release_dma_chan(mas); +- + free_irq(mas->irq, spi); +- pm_runtime_disable(&pdev->dev); ++ ++ spi_geni_release_dma_chan(mas); + } + + static int __maybe_unused spi_geni_runtime_suspend(struct device *dev) +diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c +index 168eff721ed378..93a9667f6bdcf1 100644 +--- a/drivers/spi/spi-nxp-fspi.c ++++ b/drivers/spi/spi-nxp-fspi.c +@@ -805,14 +805,15 @@ static void nxp_fspi_fill_txfifo(struct nxp_fspi *f, + if (i < op->data.nbytes) { + u32 data = 0; + int j; ++ int remaining = op->data.nbytes - i; + /* Wait for TXFIFO empty */ + ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR, + FSPI_INTR_IPTXWE, 0, + POLL_TOUT, true); + WARN_ON(ret); + +- for (j = 0; j < ALIGN(op->data.nbytes - i, 4); j += 4) { +- memcpy(&data, buf + i + j, 4); ++ for (j = 0; j < ALIGN(remaining, 4); j += 4) { ++ memcpy(&data, buf + i + j, min_t(int, 4, remaining - j)); + fspi_writel(f, data, base + FSPI_TFDR + j); + } + fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR); +diff --git a/drivers/staging/media/atomisp/pci/sh_css_frac.h b/drivers/staging/media/atomisp/pci/sh_css_frac.h +index 8f08df5c88cc36..569a2f59e5519f 100644 +--- a/drivers/staging/media/atomisp/pci/sh_css_frac.h ++++ b/drivers/staging/media/atomisp/pci/sh_css_frac.h +@@ -30,12 +30,24 @@ + #define uISP_VAL_MAX ((unsigned int)((1 << uISP_REG_BIT) - 1)) + + /* a:fraction bits for 16bit precision, b:fraction bits for ISP precision */ +-#define sDIGIT_FITTING(v, a, b) \ +- min_t(int, max_t(int, (((v) >> sSHIFT) >> max(sFRACTION_BITS_FITTING(a) - (b), 0)), \ +- sISP_VAL_MIN), sISP_VAL_MAX) +-#define uDIGIT_FITTING(v, a, b) \ +- min((unsigned int)max((unsigned)(((v) >> uSHIFT) \ +- >> max((int)(uFRACTION_BITS_FITTING(a) - (b)), 0)), \ +- uISP_VAL_MIN), uISP_VAL_MAX) ++static inline int sDIGIT_FITTING(int v, int a, int b) ++{ ++ int fit_shift = sFRACTION_BITS_FITTING(a) - b; ++ ++ v >>= sSHIFT; ++ v >>= fit_shift > 0 ? fit_shift : 0; ++ ++ return clamp_t(int, v, sISP_VAL_MIN, sISP_VAL_MAX); ++} ++ ++static inline unsigned int uDIGIT_FITTING(unsigned int v, int a, int b) ++{ ++ int fit_shift = uFRACTION_BITS_FITTING(a) - b; ++ ++ v >>= uSHIFT; ++ v >>= fit_shift > 0 ? fit_shift : 0; ++ ++ return clamp_t(unsigned int, v, uISP_VAL_MIN, uISP_VAL_MAX); ++} + + #endif /* __SH_CSS_FRAC_H */ +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index a4223821188788..ee04185d8e0f58 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -4148,6 +4148,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, + + btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2); + inode_inc_iversion(&inode->vfs_inode); ++ inode_set_ctime_current(&inode->vfs_inode); + inode_inc_iversion(&dir->vfs_inode); + inode_set_ctime_current(&inode->vfs_inode); + dir->vfs_inode.i_mtime = inode_set_ctime_current(&dir->vfs_inode); +diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c +index cf7365581031b5..a2034511b63144 100644 +--- a/fs/nfs/delegation.c ++++ b/fs/nfs/delegation.c +@@ -627,6 +627,9 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server, + prev = delegation; + continue; + } ++ inode = nfs_delegation_grab_inode(delegation); ++ if (inode == NULL) ++ continue; + + if (prev) { + struct inode *tmp = nfs_delegation_grab_inode(prev); +@@ -637,12 +640,6 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server, + } + } + +- inode = nfs_delegation_grab_inode(delegation); +- if (inode == NULL) { +- rcu_read_unlock(); +- iput(to_put); +- goto restart; +- } + delegation = nfs_start_delegation_return_locked(NFS_I(inode)); + rcu_read_unlock(); + +@@ -1164,7 +1161,6 @@ static int nfs_server_reap_unclaimed_delegations(struct nfs_server *server, + struct inode *inode; + restart: + rcu_read_lock(); +-restart_locked: + list_for_each_entry_rcu(delegation, &server->delegations, super_list) { + if (test_bit(NFS_DELEGATION_INODE_FREEING, + &delegation->flags) || +@@ -1175,7 +1171,7 @@ static int nfs_server_reap_unclaimed_delegations(struct nfs_server *server, + continue; + inode = nfs_delegation_grab_inode(delegation); + if (inode == NULL) +- goto restart_locked; ++ continue; + delegation = nfs_start_delegation_return_locked(NFS_I(inode)); + rcu_read_unlock(); + if (delegation != NULL) { +@@ -1296,7 +1292,6 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server, + nfs4_stateid stateid; + restart: + rcu_read_lock(); +-restart_locked: + list_for_each_entry_rcu(delegation, &server->delegations, super_list) { + if (test_bit(NFS_DELEGATION_INODE_FREEING, + &delegation->flags) || +@@ -1307,7 +1302,7 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server, + continue; + inode = nfs_delegation_grab_inode(delegation); + if (inode == NULL) +- goto restart_locked; ++ continue; + spin_lock(&delegation->lock); + cred = get_cred_rcu(delegation->cred); + nfs4_stateid_copy(&stateid, &delegation->stateid); +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index e7ac249df1ad6c..299ea2b86df668 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -9845,13 +9845,16 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) + fallthrough; + default: + task->tk_status = 0; ++ lrp->res.lrs_present = 0; + fallthrough; + case 0: + break; + case -NFS4ERR_DELAY: +- if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN) +- break; +- goto out_restart; ++ if (nfs4_async_handle_error(task, server, NULL, NULL) == ++ -EAGAIN) ++ goto out_restart; ++ lrp->res.lrs_present = 0; ++ break; + } + return; + out_restart: +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c +index 664d3128e730c0..3d1a9f8634a999 100644 +--- a/fs/nfs/pnfs.c ++++ b/fs/nfs/pnfs.c +@@ -1172,10 +1172,9 @@ void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo, + LIST_HEAD(freeme); + + spin_lock(&inode->i_lock); +- if (!pnfs_layout_is_valid(lo) || +- !nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid)) ++ if (!nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid)) + goto out_unlock; +- if (stateid) { ++ if (stateid && pnfs_layout_is_valid(lo)) { + u32 seq = be32_to_cpu(arg_stateid->seqid); + + pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq); +diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c +index 6322f0f68a176b..b0473c2567fe68 100644 +--- a/fs/smb/client/cifsencrypt.c ++++ b/fs/smb/client/cifsencrypt.c +@@ -129,7 +129,7 @@ static ssize_t cifs_shash_xarray(const struct iov_iter *iter, ssize_t maxsize, + for (j = foffset / PAGE_SIZE; j < npages; j++) { + len = min_t(size_t, maxsize, PAGE_SIZE - offset); + p = kmap_local_page(folio_page(folio, j)); +- ret = crypto_shash_update(shash, p, len); ++ ret = crypto_shash_update(shash, p + offset, len); + kunmap_local(p); + if (ret < 0) + return ret; +diff --git a/fs/smb/server/mgmt/share_config.c b/fs/smb/server/mgmt/share_config.c +index e0a6b758094fc5..d8d03070ae44b4 100644 +--- a/fs/smb/server/mgmt/share_config.c ++++ b/fs/smb/server/mgmt/share_config.c +@@ -15,6 +15,7 @@ + #include "share_config.h" + #include "user_config.h" + #include "user_session.h" ++#include "../connection.h" + #include "../transport_ipc.h" + #include "../misc.h" + +@@ -120,12 +121,13 @@ static int parse_veto_list(struct ksmbd_share_config *share, + return 0; + } + +-static struct ksmbd_share_config *share_config_request(struct unicode_map *um, ++static struct ksmbd_share_config *share_config_request(struct ksmbd_work *work, + const char *name) + { + struct ksmbd_share_config_response *resp; + struct ksmbd_share_config *share = NULL; + struct ksmbd_share_config *lookup; ++ struct unicode_map *um = work->conn->um; + int ret; + + resp = ksmbd_ipc_share_config_request(name); +@@ -181,7 +183,14 @@ static struct ksmbd_share_config *share_config_request(struct unicode_map *um, + KSMBD_SHARE_CONFIG_VETO_LIST(resp), + resp->veto_list_sz); + if (!ret && share->path) { ++ if (__ksmbd_override_fsids(work, share)) { ++ kill_share(share); ++ share = NULL; ++ goto out; ++ } ++ + ret = kern_path(share->path, 0, &share->vfs_path); ++ ksmbd_revert_fsids(work); + if (ret) { + ksmbd_debug(SMB, "failed to access '%s'\n", + share->path); +@@ -214,7 +223,7 @@ static struct ksmbd_share_config *share_config_request(struct unicode_map *um, + return share; + } + +-struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um, ++struct ksmbd_share_config *ksmbd_share_config_get(struct ksmbd_work *work, + const char *name) + { + struct ksmbd_share_config *share; +@@ -227,7 +236,7 @@ struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um, + + if (share) + return share; +- return share_config_request(um, name); ++ return share_config_request(work, name); + } + + bool ksmbd_share_veto_filename(struct ksmbd_share_config *share, +diff --git a/fs/smb/server/mgmt/share_config.h b/fs/smb/server/mgmt/share_config.h +index 5f591751b92365..d4ac2dd4de2040 100644 +--- a/fs/smb/server/mgmt/share_config.h ++++ b/fs/smb/server/mgmt/share_config.h +@@ -11,6 +11,8 @@ + #include + #include + ++struct ksmbd_work; ++ + struct ksmbd_share_config { + char *name; + char *path; +@@ -68,7 +70,7 @@ static inline void ksmbd_share_config_put(struct ksmbd_share_config *share) + __ksmbd_share_config_put(share); + } + +-struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um, ++struct ksmbd_share_config *ksmbd_share_config_get(struct ksmbd_work *work, + const char *name); + bool ksmbd_share_veto_filename(struct ksmbd_share_config *share, + const char *filename); +diff --git a/fs/smb/server/mgmt/tree_connect.c b/fs/smb/server/mgmt/tree_connect.c +index d2c81a8a11dda1..94a52a75014a43 100644 +--- a/fs/smb/server/mgmt/tree_connect.c ++++ b/fs/smb/server/mgmt/tree_connect.c +@@ -16,17 +16,18 @@ + #include "user_session.h" + + struct ksmbd_tree_conn_status +-ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess, +- const char *share_name) ++ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name) + { + struct ksmbd_tree_conn_status status = {-ENOENT, NULL}; + struct ksmbd_tree_connect_response *resp = NULL; + struct ksmbd_share_config *sc; + struct ksmbd_tree_connect *tree_conn = NULL; + struct sockaddr *peer_addr; ++ struct ksmbd_conn *conn = work->conn; ++ struct ksmbd_session *sess = work->sess; + int ret; + +- sc = ksmbd_share_config_get(conn->um, share_name); ++ sc = ksmbd_share_config_get(work, share_name); + if (!sc) + return status; + +@@ -61,7 +62,7 @@ ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess, + struct ksmbd_share_config *new_sc; + + ksmbd_share_config_del(sc); +- new_sc = ksmbd_share_config_get(conn->um, share_name); ++ new_sc = ksmbd_share_config_get(work, share_name); + if (!new_sc) { + pr_err("Failed to update stale share config\n"); + status.ret = -ESTALE; +diff --git a/fs/smb/server/mgmt/tree_connect.h b/fs/smb/server/mgmt/tree_connect.h +index 6377a70b811c89..a42cdd05104114 100644 +--- a/fs/smb/server/mgmt/tree_connect.h ++++ b/fs/smb/server/mgmt/tree_connect.h +@@ -13,6 +13,7 @@ + struct ksmbd_share_config; + struct ksmbd_user; + struct ksmbd_conn; ++struct ksmbd_work; + + enum { + TREE_NEW = 0, +@@ -50,8 +51,7 @@ static inline int test_tree_conn_flag(struct ksmbd_tree_connect *tree_conn, + struct ksmbd_session; + + struct ksmbd_tree_conn_status +-ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess, +- const char *share_name); ++ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name); + void ksmbd_tree_connect_put(struct ksmbd_tree_connect *tcon); + + int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess, +diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c +index 458cc736286aae..c6473b08b1f358 100644 +--- a/fs/smb/server/smb2pdu.c ++++ b/fs/smb/server/smb2pdu.c +@@ -1959,7 +1959,7 @@ int smb2_tree_connect(struct ksmbd_work *work) + ksmbd_debug(SMB, "tree connect request for tree %s treename %s\n", + name, treename); + +- status = ksmbd_tree_conn_connect(conn, sess, name); ++ status = ksmbd_tree_conn_connect(work, name); + if (status.ret == KSMBD_TREE_CONN_STATUS_OK) + rsp->hdr.Id.SyncId.TreeId = cpu_to_le32(status.tree_conn->id); + else +@@ -3714,7 +3714,7 @@ int smb2_open(struct ksmbd_work *work) + kfree(name); + kfree(lc); + +- return 0; ++ return rc; + } + + static int readdir_info_level_struct_sz(int info_level) +@@ -5601,6 +5601,11 @@ int smb2_query_info(struct ksmbd_work *work) + + ksmbd_debug(SMB, "GOT query info request\n"); + ++ if (ksmbd_override_fsids(work)) { ++ rc = -ENOMEM; ++ goto err_out; ++ } ++ + switch (req->InfoType) { + case SMB2_O_INFO_FILE: + ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n"); +@@ -5619,6 +5624,7 @@ int smb2_query_info(struct ksmbd_work *work) + req->InfoType); + rc = -EOPNOTSUPP; + } ++ ksmbd_revert_fsids(work); + + if (!rc) { + rsp->StructureSize = cpu_to_le16(9); +@@ -5628,6 +5634,7 @@ int smb2_query_info(struct ksmbd_work *work) + le32_to_cpu(rsp->OutputBufferLength)); + } + ++err_out: + if (rc < 0) { + if (rc == -EACCES) + rsp->hdr.Status = STATUS_ACCESS_DENIED; +diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c +index 474dadf6b7b8bc..13818ecb6e1b2f 100644 +--- a/fs/smb/server/smb_common.c ++++ b/fs/smb/server/smb_common.c +@@ -732,10 +732,10 @@ bool is_asterisk(char *p) + return p && p[0] == '*'; + } + +-int ksmbd_override_fsids(struct ksmbd_work *work) ++int __ksmbd_override_fsids(struct ksmbd_work *work, ++ struct ksmbd_share_config *share) + { + struct ksmbd_session *sess = work->sess; +- struct ksmbd_share_config *share = work->tcon->share_conf; + struct cred *cred; + struct group_info *gi; + unsigned int uid; +@@ -775,6 +775,11 @@ int ksmbd_override_fsids(struct ksmbd_work *work) + return 0; + } + ++int ksmbd_override_fsids(struct ksmbd_work *work) ++{ ++ return __ksmbd_override_fsids(work, work->tcon->share_conf); ++} ++ + void ksmbd_revert_fsids(struct ksmbd_work *work) + { + const struct cred *cred; +diff --git a/fs/smb/server/smb_common.h b/fs/smb/server/smb_common.h +index f1092519c0c288..4a3148b0167f54 100644 +--- a/fs/smb/server/smb_common.h ++++ b/fs/smb/server/smb_common.h +@@ -447,6 +447,8 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn, + int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command); + + int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp); ++int __ksmbd_override_fsids(struct ksmbd_work *work, ++ struct ksmbd_share_config *share); + int ksmbd_override_fsids(struct ksmbd_work *work); + void ksmbd_revert_fsids(struct ksmbd_work *work); + +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h +index 3d1cd726df3471..9106771bb92f01 100644 +--- a/include/linux/mlx5/mlx5_ifc.h ++++ b/include/linux/mlx5/mlx5_ifc.h +@@ -1010,7 +1010,8 @@ struct mlx5_ifc_qos_cap_bits { + + u8 max_tsar_bw_share[0x20]; + +- u8 reserved_at_100[0x20]; ++ u8 nic_element_type[0x10]; ++ u8 nic_tsar_type[0x10]; + + u8 reserved_at_120[0x3]; + u8 log_meter_aso_granularity[0x5]; +@@ -3843,10 +3844,11 @@ enum { + }; + + enum { +- ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0, ++ ELEMENT_TYPE_CAP_MASK_TSAR = 1 << 0, + ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1, + ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2, + ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3, ++ ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP = 1 << 4, + }; + + struct mlx5_ifc_scheduling_context_bits { +@@ -4546,6 +4548,12 @@ enum { + TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2, + }; + ++enum { ++ TSAR_TYPE_CAP_MASK_DWRR = 1 << 0, ++ TSAR_TYPE_CAP_MASK_ROUND_ROBIN = 1 << 1, ++ TSAR_TYPE_CAP_MASK_ETS = 1 << 2, ++}; ++ + struct mlx5_ifc_tsar_element_bits { + u8 reserved_at_0[0x8]; + u8 tsar_type[0x8]; +diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h +index 98b2e1e149f93c..5cc34216f23c32 100644 +--- a/include/linux/mlx5/port.h ++++ b/include/linux/mlx5/port.h +@@ -115,7 +115,7 @@ enum mlx5e_ext_link_mode { + MLX5E_100GAUI_1_100GBASE_CR_KR = 11, + MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12, + MLX5E_200GAUI_2_200GBASE_CR2_KR2 = 13, +- MLX5E_400GAUI_8 = 15, ++ MLX5E_400GAUI_8_400GBASE_CR8 = 15, + MLX5E_400GAUI_4_400GBASE_CR4_KR4 = 16, + MLX5E_EXT_LINK_MODES_NUMBER, + }; +diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h +index 4523e4e8331970..526025561df199 100644 +--- a/include/linux/nvmem-consumer.h ++++ b/include/linux/nvmem-consumer.h +@@ -81,6 +81,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, void *buf); + + const char *nvmem_dev_name(struct nvmem_device *nvmem); ++size_t nvmem_dev_size(struct nvmem_device *nvmem); + + void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, + size_t nentries); +diff --git a/include/linux/property.h b/include/linux/property.h +index 1684fca930f726..d32b8052e0863b 100644 +--- a/include/linux/property.h ++++ b/include/linux/property.h +@@ -11,6 +11,7 @@ + #define _LINUX_PROPERTY_H_ + + #include ++#include + #include + #include + #include +@@ -167,6 +168,11 @@ struct fwnode_handle *device_get_next_child_node(const struct device *dev, + for (child = device_get_next_child_node(dev, NULL); child; \ + child = device_get_next_child_node(dev, child)) + ++#define device_for_each_child_node_scoped(dev, child) \ ++ for (struct fwnode_handle *child __free(fwnode_handle) = \ ++ device_get_next_child_node(dev, NULL); \ ++ child; child = device_get_next_child_node(dev, child)) ++ + struct fwnode_handle *fwnode_get_named_child_node(const struct fwnode_handle *fwnode, + const char *childname); + struct fwnode_handle *device_get_named_child_node(const struct device *dev, +@@ -175,6 +181,8 @@ struct fwnode_handle *device_get_named_child_node(const struct device *dev, + struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode); + void fwnode_handle_put(struct fwnode_handle *fwnode); + ++DEFINE_FREE(fwnode_handle, struct fwnode_handle *, fwnode_handle_put(_T)) ++ + int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index); + int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name); + +diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h +index 6c395a2600e8d1..276ca543ef44d8 100644 +--- a/include/linux/virtio_net.h ++++ b/include/linux/virtio_net.h +@@ -173,7 +173,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, + break; + case SKB_GSO_TCPV4: + case SKB_GSO_TCPV6: +- if (skb->csum_offset != offsetof(struct tcphdr, check)) ++ if (skb->ip_summed == CHECKSUM_PARTIAL && ++ skb->csum_offset != offsetof(struct tcphdr, check)) + return -EINVAL; + break; + } +diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c +index 5b06f67879f5fa..461b4ab60b501a 100644 +--- a/kernel/trace/trace_osnoise.c ++++ b/kernel/trace/trace_osnoise.c +@@ -228,6 +228,11 @@ static inline struct osnoise_variables *this_cpu_osn_var(void) + return this_cpu_ptr(&per_cpu_osnoise_var); + } + ++/* ++ * Protect the interface. ++ */ ++static struct mutex interface_lock; ++ + #ifdef CONFIG_TIMERLAT_TRACER + /* + * Runtime information for the timer mode. +@@ -252,11 +257,6 @@ static inline struct timerlat_variables *this_cpu_tmr_var(void) + return this_cpu_ptr(&per_cpu_timerlat_var); + } + +-/* +- * Protect the interface. +- */ +-static struct mutex interface_lock; +- + /* + * tlat_var_reset - Reset the values of the given timerlat_variables + */ +diff --git a/mm/memory.c b/mm/memory.c +index bfd2273cb4b460..b6ddfe22c5d5c0 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -2424,11 +2424,7 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, + return 0; + } + +-/* +- * Variant of remap_pfn_range that does not call track_pfn_remap. The caller +- * must have pre-validated the caching bits of the pgprot_t. +- */ +-int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, ++static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t prot) + { + pgd_t *pgd; +@@ -2481,6 +2477,27 @@ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, + return 0; + } + ++/* ++ * Variant of remap_pfn_range that does not call track_pfn_remap. The caller ++ * must have pre-validated the caching bits of the pgprot_t. ++ */ ++int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, ++ unsigned long pfn, unsigned long size, pgprot_t prot) ++{ ++ int error = remap_pfn_range_internal(vma, addr, pfn, size, prot); ++ ++ if (!error) ++ return 0; ++ ++ /* ++ * A partial pfn range mapping is dangerous: it does not ++ * maintain page reference counts, and callers may free ++ * pages due to the error. So zap it early. ++ */ ++ zap_page_range_single(vma, addr, size, NULL); ++ return error; ++} ++ + /** + * remap_pfn_range - remap kernel memory to userspace + * @vma: user vma to map to +diff --git a/net/ipv4/fou_core.c b/net/ipv4/fou_core.c +index e0b8d6b17a34dc..4e0a7d038e219c 100644 +--- a/net/ipv4/fou_core.c ++++ b/net/ipv4/fou_core.c +@@ -336,11 +336,11 @@ static struct sk_buff *gue_gro_receive(struct sock *sk, + struct gro_remcsum grc; + u8 proto; + ++ skb_gro_remcsum_init(&grc); ++ + if (!fou) + goto out; + +- skb_gro_remcsum_init(&grc); +- + off = skb_gro_offset(skb); + len = off + sizeof(*guehdr); + +diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c +index d5902e7f47a782..953c22c0ec47ec 100644 +--- a/net/mptcp/pm_netlink.c ++++ b/net/mptcp/pm_netlink.c +@@ -339,15 +339,21 @@ mptcp_pm_del_add_timer(struct mptcp_sock *msk, + { + struct mptcp_pm_add_entry *entry; + struct sock *sk = (struct sock *)msk; ++ struct timer_list *add_timer = NULL; + + spin_lock_bh(&msk->pm.lock); + entry = mptcp_lookup_anno_list_by_saddr(msk, addr); +- if (entry && (!check_id || entry->addr.id == addr->id)) ++ if (entry && (!check_id || entry->addr.id == addr->id)) { + entry->retrans_times = ADD_ADDR_RETRANS_MAX; ++ add_timer = &entry->add_timer; ++ } ++ if (!check_id && entry) ++ list_del(&entry->list); + spin_unlock_bh(&msk->pm.lock); + +- if (entry && (!check_id || entry->addr.id == addr->id)) +- sk_stop_timer_sync(sk, &entry->add_timer); ++ /* no lock, because sk_stop_timer_sync() is calling del_timer_sync() */ ++ if (add_timer) ++ sk_stop_timer_sync(sk, add_timer); + + return entry; + } +@@ -1493,7 +1499,6 @@ static bool remove_anno_list_by_saddr(struct mptcp_sock *msk, + + entry = mptcp_pm_del_add_timer(msk, addr, false); + if (entry) { +- list_del(&entry->list); + kfree(entry); + return true; + } +diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c +index f30163e2ca6207..765ffd6e06bc41 100644 +--- a/net/netfilter/nft_socket.c ++++ b/net/netfilter/nft_socket.c +@@ -110,13 +110,13 @@ static void nft_socket_eval(const struct nft_expr *expr, + *dest = READ_ONCE(sk->sk_mark); + } else { + regs->verdict.code = NFT_BREAK; +- return; ++ goto out_put_sk; + } + break; + case NFT_SOCKET_WILDCARD: + if (!sk_fullsock(sk)) { + regs->verdict.code = NFT_BREAK; +- return; ++ goto out_put_sk; + } + nft_socket_wildcard(pkt, regs, sk, dest); + break; +@@ -124,7 +124,7 @@ static void nft_socket_eval(const struct nft_expr *expr, + case NFT_SOCKET_CGROUPV2: + if (!nft_sock_get_eval_cgroupv2(dest, sk, pkt, priv->level)) { + regs->verdict.code = NFT_BREAK; +- return; ++ goto out_put_sk; + } + break; + #endif +@@ -133,6 +133,7 @@ static void nft_socket_eval(const struct nft_expr *expr, + regs->verdict.code = NFT_BREAK; + } + ++out_put_sk: + if (sk != skb->sk) + sock_gen_put(sk); + } +diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh +index 902eb429b9dbd9..0b7952471c18f6 100755 +--- a/scripts/kconfig/merge_config.sh ++++ b/scripts/kconfig/merge_config.sh +@@ -167,6 +167,8 @@ for ORIG_MERGE_FILE in $MERGE_LIST ; do + sed -i "/$CFG[ =]/d" $MERGE_FILE + fi + done ++ # In case the previous file lacks a new line at the end ++ echo >> $TMP_FILE + cat $MERGE_FILE >> $TMP_FILE + done + +diff --git a/sound/soc/codecs/peb2466.c b/sound/soc/codecs/peb2466.c +index 5dec69be0acb2e..06c83d2042f3e5 100644 +--- a/sound/soc/codecs/peb2466.c ++++ b/sound/soc/codecs/peb2466.c +@@ -229,7 +229,8 @@ static int peb2466_reg_read(void *context, unsigned int reg, unsigned int *val) + case PEB2466_CMD_XOP: + case PEB2466_CMD_SOP: + ret = peb2466_read_byte(peb2466, reg, &tmp); +- *val = tmp; ++ if (!ret) ++ *val = tmp; + break; + default: + dev_err(&peb2466->spi->dev, "Not a XOP or SOP command\n"); +diff --git a/sound/soc/meson/axg-card.c b/sound/soc/meson/axg-card.c +index b6f5b4572012da..44175b1b14a295 100644 +--- a/sound/soc/meson/axg-card.c ++++ b/sound/soc/meson/axg-card.c +@@ -104,7 +104,7 @@ static int axg_card_add_tdm_loopback(struct snd_soc_card *card, + int *index) + { + struct meson_card *priv = snd_soc_card_get_drvdata(card); +- struct snd_soc_dai_link *pad = &card->dai_link[*index]; ++ struct snd_soc_dai_link *pad; + struct snd_soc_dai_link *lb; + struct snd_soc_dai_link_component *dlc; + int ret; +@@ -114,6 +114,7 @@ static int axg_card_add_tdm_loopback(struct snd_soc_card *card, + if (ret) + return ret; + ++ pad = &card->dai_link[*index]; + lb = &card->dai_link[*index + 1]; + + lb->name = devm_kasprintf(card->dev, GFP_KERNEL, "%s-lb", pad->name); +diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +index 8df8cbb447f10f..84d59419e4eb5b 100644 +--- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c ++++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +@@ -1841,7 +1841,7 @@ static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd, + if (err) + return; + +- if (socketpair(AF_UNIX, SOCK_DGRAM | SOCK_NONBLOCK, 0, sfd)) ++ if (socketpair(AF_UNIX, type | SOCK_NONBLOCK, 0, sfd)) + goto close_cli0; + c1 = sfd[0], p1 = sfd[1]; + +@@ -1876,7 +1876,6 @@ static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd, + close_cli0: + xclose(c0); + xclose(p0); +- + } + + static void unix_inet_skb_redir_to_connected(struct test_sockmap_listen *skel, +diff --git a/tools/testing/selftests/net/csum.c b/tools/testing/selftests/net/csum.c +index 90eb06fefa59ec..eef72b50270c5d 100644 +--- a/tools/testing/selftests/net/csum.c ++++ b/tools/testing/selftests/net/csum.c +@@ -654,10 +654,16 @@ static int recv_verify_packet_ipv4(void *nh, int len) + { + struct iphdr *iph = nh; + uint16_t proto = cfg_encap ? IPPROTO_UDP : cfg_proto; ++ uint16_t ip_len; + + if (len < sizeof(*iph) || iph->protocol != proto) + return -1; + ++ ip_len = ntohs(iph->tot_len); ++ if (ip_len > len || ip_len < sizeof(*iph)) ++ return -1; ++ ++ len = ip_len; + iph_addr_p = &iph->saddr; + if (proto == IPPROTO_TCP) + return recv_verify_packet_tcp(iph + 1, len - sizeof(*iph)); +@@ -669,16 +675,22 @@ static int recv_verify_packet_ipv6(void *nh, int len) + { + struct ipv6hdr *ip6h = nh; + uint16_t proto = cfg_encap ? IPPROTO_UDP : cfg_proto; ++ uint16_t ip_len; + + if (len < sizeof(*ip6h) || ip6h->nexthdr != proto) + return -1; + ++ ip_len = ntohs(ip6h->payload_len); ++ if (ip_len > len - sizeof(*ip6h)) ++ return -1; ++ ++ len = ip_len; + iph_addr_p = &ip6h->saddr; + + if (proto == IPPROTO_TCP) +- return recv_verify_packet_tcp(ip6h + 1, len - sizeof(*ip6h)); ++ return recv_verify_packet_tcp(ip6h + 1, len); + else +- return recv_verify_packet_udp(ip6h + 1, len - sizeof(*ip6h)); ++ return recv_verify_packet_udp(ip6h + 1, len); + } + + /* return whether auxdata includes TP_STATUS_CSUM_VALID */ +diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh +index 6faff03acc110b..3c286fba8d5dc5 100755 +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh +@@ -3222,7 +3222,9 @@ fullmesh_tests() + pm_nl_set_limits $ns1 1 3 + pm_nl_set_limits $ns2 1 3 + pm_nl_add_endpoint $ns1 10.0.2.1 flags signal +- pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,fullmesh ++ if mptcp_lib_kallsyms_has "mptcp_pm_subflow_check_next$"; then ++ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,fullmesh ++ fi + fullmesh=1 speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 3 3 3 diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.52-53.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.52-53.patch new file mode 100644 index 000000000000..a5afacdacb69 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.52-53.patch @@ -0,0 +1,2051 @@ +diff --git a/Makefile b/Makefile +index 5b22e3ff440ca1..0158e14f0dd966 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 52 ++SUBLEVEL = 53 + EXTRAVERSION = + NAME = Hurr durr I'ma ninja sloth + +diff --git a/arch/loongarch/include/asm/hw_irq.h b/arch/loongarch/include/asm/hw_irq.h +index af4f4e8fbd858f..8156ffb6741591 100644 +--- a/arch/loongarch/include/asm/hw_irq.h ++++ b/arch/loongarch/include/asm/hw_irq.h +@@ -9,6 +9,8 @@ + + extern atomic_t irq_err_count; + ++#define ARCH_IRQ_INIT_FLAGS IRQ_NOPROBE ++ + /* + * interrupt-retrigger: NOP for now. This may not be appropriate for all + * machines, we'll see ... +diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c +index 883e5066ae445f..df42c063f6c430 100644 +--- a/arch/loongarch/kernel/irq.c ++++ b/arch/loongarch/kernel/irq.c +@@ -122,9 +122,6 @@ void __init init_IRQ(void) + panic("IPI IRQ request failed\n"); + #endif + +- for (i = 0; i < NR_IRQS; i++) +- irq_set_noprobe(i); +- + for_each_possible_cpu(i) { + page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, order); + +diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c +index 3827dc76edd823..4520c57415797f 100644 +--- a/arch/microblaze/mm/init.c ++++ b/arch/microblaze/mm/init.c +@@ -193,11 +193,6 @@ asmlinkage void __init mmu_init(void) + { + unsigned int kstart, ksize; + +- if (!memblock.reserved.cnt) { +- pr_emerg("Error memory count\n"); +- machine_restart(NULL); +- } +- + if ((u32) memblock.memory.regions[0].size < 0x400000) { + pr_emerg("Memory must be greater than 4MB\n"); + machine_restart(NULL); +diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c +index fac4b4116efe10..bcb2d640a0cd85 100644 +--- a/arch/x86/kernel/cpu/mshyperv.c ++++ b/arch/x86/kernel/cpu/mshyperv.c +@@ -423,6 +423,7 @@ static void __init ms_hyperv_init_platform(void) + ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) { + x86_platform.calibrate_tsc = hv_get_tsc_khz; + x86_platform.calibrate_cpu = hv_get_tsc_khz; ++ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); + } + + if (ms_hyperv.priv_high & HV_ISOLATION) { +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c +index 679893ea5e6873..6215dfa23578da 100644 +--- a/arch/x86/mm/init.c ++++ b/arch/x86/mm/init.c +@@ -261,21 +261,17 @@ static void __init probe_page_size_mask(void) + } + } + +-#define INTEL_MATCH(_model) { .vendor = X86_VENDOR_INTEL, \ +- .family = 6, \ +- .model = _model, \ +- } + /* + * INVLPG may not properly flush Global entries + * on these CPUs when PCIDs are enabled. + */ + static const struct x86_cpu_id invlpg_miss_ids[] = { +- INTEL_MATCH(INTEL_FAM6_ALDERLAKE ), +- INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ), +- INTEL_MATCH(INTEL_FAM6_ATOM_GRACEMONT ), +- INTEL_MATCH(INTEL_FAM6_RAPTORLAKE ), +- INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P), +- INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S), ++ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 0), ++ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 0), ++ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, 0), ++ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, 0), ++ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, 0), ++ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, 0), + {} + }; + +diff --git a/block/blk-core.c b/block/blk-core.c +index bf058cea9016ae..4f25d2c4bc7055 100644 +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -49,6 +49,7 @@ + #include "blk-pm.h" + #include "blk-cgroup.h" + #include "blk-throttle.h" ++#include "blk-ioprio.h" + + struct dentry *blk_debugfs_root; + +@@ -819,6 +820,14 @@ void submit_bio_noacct(struct bio *bio) + } + EXPORT_SYMBOL(submit_bio_noacct); + ++static void bio_set_ioprio(struct bio *bio) ++{ ++ /* Nobody set ioprio so far? Initialize it based on task's nice value */ ++ if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE) ++ bio->bi_ioprio = get_current_ioprio(); ++ blkcg_set_ioprio(bio); ++} ++ + /** + * submit_bio - submit a bio to the block device layer for I/O + * @bio: The &struct bio which describes the I/O +@@ -841,6 +850,7 @@ void submit_bio(struct bio *bio) + count_vm_events(PGPGOUT, bio_sectors(bio)); + } + ++ bio_set_ioprio(bio); + submit_bio_noacct(bio); + } + EXPORT_SYMBOL(submit_bio); +diff --git a/block/blk-mq.c b/block/blk-mq.c +index 7cc315527a44c3..733d72f4d1cc9d 100644 +--- a/block/blk-mq.c ++++ b/block/blk-mq.c +@@ -40,7 +40,6 @@ + #include "blk-stat.h" + #include "blk-mq-sched.h" + #include "blk-rq-qos.h" +-#include "blk-ioprio.h" + + static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); + static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd); +@@ -2956,14 +2955,6 @@ static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug, + return true; + } + +-static void bio_set_ioprio(struct bio *bio) +-{ +- /* Nobody set ioprio so far? Initialize it based on task's nice value */ +- if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE) +- bio->bi_ioprio = get_current_ioprio(); +- blkcg_set_ioprio(bio); +-} +- + /** + * blk_mq_submit_bio - Create and send a request to block device. + * @bio: Bio pointer. +@@ -2988,7 +2979,6 @@ void blk_mq_submit_bio(struct bio *bio) + blk_status_t ret; + + bio = blk_queue_bounce(bio, q); +- bio_set_ioprio(bio); + + if (plug) { + rq = rq_list_peek(&plug->cached_rq); +diff --git a/drivers/accel/drm_accel.c b/drivers/accel/drm_accel.c +index 4a9baf02439e42..8827cb78ca9d8c 100644 +--- a/drivers/accel/drm_accel.c ++++ b/drivers/accel/drm_accel.c +@@ -8,7 +8,7 @@ + + #include + #include +-#include ++#include + + #include + #include +@@ -17,8 +17,7 @@ + #include + #include + +-static DEFINE_SPINLOCK(accel_minor_lock); +-static struct idr accel_minors_idr; ++DEFINE_XARRAY_ALLOC(accel_minors_xa); + + static struct dentry *accel_debugfs_root; + static struct class *accel_class; +@@ -120,99 +119,6 @@ void accel_set_device_instance_params(struct device *kdev, int index) + kdev->type = &accel_sysfs_device_minor; + } + +-/** +- * accel_minor_alloc() - Allocates a new accel minor +- * +- * This function access the accel minors idr and allocates from it +- * a new id to represent a new accel minor +- * +- * Return: A new id on success or error code in case idr_alloc failed +- */ +-int accel_minor_alloc(void) +-{ +- unsigned long flags; +- int r; +- +- spin_lock_irqsave(&accel_minor_lock, flags); +- r = idr_alloc(&accel_minors_idr, NULL, 0, ACCEL_MAX_MINORS, GFP_NOWAIT); +- spin_unlock_irqrestore(&accel_minor_lock, flags); +- +- return r; +-} +- +-/** +- * accel_minor_remove() - Remove an accel minor +- * @index: The minor id to remove. +- * +- * This function access the accel minors idr and removes from +- * it the member with the id that is passed to this function. +- */ +-void accel_minor_remove(int index) +-{ +- unsigned long flags; +- +- spin_lock_irqsave(&accel_minor_lock, flags); +- idr_remove(&accel_minors_idr, index); +- spin_unlock_irqrestore(&accel_minor_lock, flags); +-} +- +-/** +- * accel_minor_replace() - Replace minor pointer in accel minors idr. +- * @minor: Pointer to the new minor. +- * @index: The minor id to replace. +- * +- * This function access the accel minors idr structure and replaces the pointer +- * that is associated with an existing id. Because the minor pointer can be +- * NULL, we need to explicitly pass the index. +- * +- * Return: 0 for success, negative value for error +- */ +-void accel_minor_replace(struct drm_minor *minor, int index) +-{ +- unsigned long flags; +- +- spin_lock_irqsave(&accel_minor_lock, flags); +- idr_replace(&accel_minors_idr, minor, index); +- spin_unlock_irqrestore(&accel_minor_lock, flags); +-} +- +-/* +- * Looks up the given minor-ID and returns the respective DRM-minor object. The +- * refence-count of the underlying device is increased so you must release this +- * object with accel_minor_release(). +- * +- * The object can be only a drm_minor that represents an accel device. +- * +- * As long as you hold this minor, it is guaranteed that the object and the +- * minor->dev pointer will stay valid! However, the device may get unplugged and +- * unregistered while you hold the minor. +- */ +-static struct drm_minor *accel_minor_acquire(unsigned int minor_id) +-{ +- struct drm_minor *minor; +- unsigned long flags; +- +- spin_lock_irqsave(&accel_minor_lock, flags); +- minor = idr_find(&accel_minors_idr, minor_id); +- if (minor) +- drm_dev_get(minor->dev); +- spin_unlock_irqrestore(&accel_minor_lock, flags); +- +- if (!minor) { +- return ERR_PTR(-ENODEV); +- } else if (drm_dev_is_unplugged(minor->dev)) { +- drm_dev_put(minor->dev); +- return ERR_PTR(-ENODEV); +- } +- +- return minor; +-} +- +-static void accel_minor_release(struct drm_minor *minor) +-{ +- drm_dev_put(minor->dev); +-} +- + /** + * accel_open - open method for ACCEL file + * @inode: device inode +@@ -230,7 +136,7 @@ int accel_open(struct inode *inode, struct file *filp) + struct drm_minor *minor; + int retcode; + +- minor = accel_minor_acquire(iminor(inode)); ++ minor = drm_minor_acquire(&accel_minors_xa, iminor(inode)); + if (IS_ERR(minor)) + return PTR_ERR(minor); + +@@ -249,7 +155,7 @@ int accel_open(struct inode *inode, struct file *filp) + + err_undo: + atomic_dec(&dev->open_count); +- accel_minor_release(minor); ++ drm_minor_release(minor); + return retcode; + } + EXPORT_SYMBOL_GPL(accel_open); +@@ -260,7 +166,7 @@ static int accel_stub_open(struct inode *inode, struct file *filp) + struct drm_minor *minor; + int err; + +- minor = accel_minor_acquire(iminor(inode)); ++ minor = drm_minor_acquire(&accel_minors_xa, iminor(inode)); + if (IS_ERR(minor)) + return PTR_ERR(minor); + +@@ -277,7 +183,7 @@ static int accel_stub_open(struct inode *inode, struct file *filp) + err = 0; + + out: +- accel_minor_release(minor); ++ drm_minor_release(minor); + + return err; + } +@@ -293,15 +199,13 @@ void accel_core_exit(void) + unregister_chrdev(ACCEL_MAJOR, "accel"); + debugfs_remove(accel_debugfs_root); + accel_sysfs_destroy(); +- idr_destroy(&accel_minors_idr); ++ WARN_ON(!xa_empty(&accel_minors_xa)); + } + + int __init accel_core_init(void) + { + int ret; + +- idr_init(&accel_minors_idr); +- + ret = accel_sysfs_init(); + if (ret < 0) { + DRM_ERROR("Cannot create ACCEL class: %d\n", ret); +diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c +index d526a4c91e82e5..545998e9f6ad21 100644 +--- a/drivers/gpio/gpiolib-cdev.c ++++ b/drivers/gpio/gpiolib-cdev.c +@@ -1565,12 +1565,14 @@ static long linereq_set_config_unlocked(struct linereq *lr, + line = &lr->lines[i]; + desc = lr->lines[i].desc; + flags = gpio_v2_line_config_flags(lc, i); +- gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags); +- edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS; + /* +- * Lines have to be requested explicitly for input +- * or output, else the line will be treated "as is". ++ * Lines not explicitly reconfigured as input or output ++ * are left unchanged. + */ ++ if (!(flags & GPIO_V2_LINE_DIRECTION_FLAGS)) ++ continue; ++ gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags); ++ edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS; + if (flags & GPIO_V2_LINE_FLAG_OUTPUT) { + int val = gpio_v2_line_config_output_value(lc, i); + +@@ -1578,7 +1580,7 @@ static long linereq_set_config_unlocked(struct linereq *lr, + ret = gpiod_direction_output(desc, val); + if (ret) + return ret; +- } else if (flags & GPIO_V2_LINE_FLAG_INPUT) { ++ } else { + ret = gpiod_direction_input(desc); + if (ret) + return ret; +diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +index 9299026701f348..1a5fa7df284dec 100644 +--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c ++++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +@@ -160,6 +160,7 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc, + struct drm_plane *plane; + struct list_head zorder_list; + int order = 0, err; ++ u32 slave_zpos = 0; + + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n", + crtc->base.id, crtc->name); +@@ -199,10 +200,13 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc, + plane_st->zpos, plane_st->normalized_zpos); + + /* calculate max slave zorder */ +- if (has_bit(drm_plane_index(plane), kcrtc->slave_planes)) ++ if (has_bit(drm_plane_index(plane), kcrtc->slave_planes)) { ++ slave_zpos = plane_st->normalized_zpos; ++ if (to_kplane_st(plane_st)->layer_split) ++ slave_zpos++; + kcrtc_st->max_slave_zorder = +- max(plane_st->normalized_zpos, +- kcrtc_st->max_slave_zorder); ++ max(slave_zpos, kcrtc_st->max_slave_zorder); ++ } + } + + crtc_st->zpos_changed = true; +diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c +index 71bb8806dc5f5b..d453d710ef0c10 100644 +--- a/drivers/gpu/drm/drm_drv.c ++++ b/drivers/gpu/drm/drm_drv.c +@@ -34,6 +34,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -54,8 +55,7 @@ MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"); + MODULE_DESCRIPTION("DRM shared core routines"); + MODULE_LICENSE("GPL and additional rights"); + +-static DEFINE_SPINLOCK(drm_minor_lock); +-static struct idr drm_minors_idr; ++DEFINE_XARRAY_ALLOC(drm_minors_xa); + + /* + * If the drm core fails to init for whatever reason, +@@ -83,6 +83,18 @@ DEFINE_STATIC_SRCU(drm_unplug_srcu); + * registered and unregistered dynamically according to device-state. + */ + ++static struct xarray *drm_minor_get_xa(enum drm_minor_type type) ++{ ++ if (type == DRM_MINOR_PRIMARY || type == DRM_MINOR_RENDER) ++ return &drm_minors_xa; ++#if IS_ENABLED(CONFIG_DRM_ACCEL) ++ else if (type == DRM_MINOR_ACCEL) ++ return &accel_minors_xa; ++#endif ++ else ++ return ERR_PTR(-EOPNOTSUPP); ++} ++ + static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, + enum drm_minor_type type) + { +@@ -101,25 +113,31 @@ static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, + static void drm_minor_alloc_release(struct drm_device *dev, void *data) + { + struct drm_minor *minor = data; +- unsigned long flags; + + WARN_ON(dev != minor->dev); + + put_device(minor->kdev); + +- if (minor->type == DRM_MINOR_ACCEL) { +- accel_minor_remove(minor->index); +- } else { +- spin_lock_irqsave(&drm_minor_lock, flags); +- idr_remove(&drm_minors_idr, minor->index); +- spin_unlock_irqrestore(&drm_minor_lock, flags); +- } ++ xa_erase(drm_minor_get_xa(minor->type), minor->index); + } + ++/* ++ * DRM used to support 64 devices, for backwards compatibility we need to maintain the ++ * minor allocation scheme where minors 0-63 are primary nodes, 64-127 are control nodes, ++ * and 128-191 are render nodes. ++ * After reaching the limit, we're allocating minors dynamically - first-come, first-serve. ++ * Accel nodes are using a distinct major, so the minors are allocated in continuous 0-MAX ++ * range. ++ */ ++#define DRM_MINOR_LIMIT(t) ({ \ ++ typeof(t) _t = (t); \ ++ _t == DRM_MINOR_ACCEL ? XA_LIMIT(0, ACCEL_MAX_MINORS) : XA_LIMIT(64 * _t, 64 * _t + 63); \ ++}) ++#define DRM_EXTENDED_MINOR_LIMIT XA_LIMIT(192, (1 << MINORBITS) - 1) ++ + static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type) + { + struct drm_minor *minor; +- unsigned long flags; + int r; + + minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL); +@@ -129,25 +147,14 @@ static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type) + minor->type = type; + minor->dev = dev; + +- idr_preload(GFP_KERNEL); +- if (type == DRM_MINOR_ACCEL) { +- r = accel_minor_alloc(); +- } else { +- spin_lock_irqsave(&drm_minor_lock, flags); +- r = idr_alloc(&drm_minors_idr, +- NULL, +- 64 * type, +- 64 * (type + 1), +- GFP_NOWAIT); +- spin_unlock_irqrestore(&drm_minor_lock, flags); +- } +- idr_preload_end(); +- ++ r = xa_alloc(drm_minor_get_xa(type), &minor->index, ++ NULL, DRM_MINOR_LIMIT(type), GFP_KERNEL); ++ if (r == -EBUSY && (type == DRM_MINOR_PRIMARY || type == DRM_MINOR_RENDER)) ++ r = xa_alloc(&drm_minors_xa, &minor->index, ++ NULL, DRM_EXTENDED_MINOR_LIMIT, GFP_KERNEL); + if (r < 0) + return r; + +- minor->index = r; +- + r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor); + if (r) + return r; +@@ -163,7 +170,7 @@ static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type) + static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type) + { + struct drm_minor *minor; +- unsigned long flags; ++ void *entry; + int ret; + + DRM_DEBUG("\n"); +@@ -187,13 +194,12 @@ static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type) + goto err_debugfs; + + /* replace NULL with @minor so lookups will succeed from now on */ +- if (minor->type == DRM_MINOR_ACCEL) { +- accel_minor_replace(minor, minor->index); +- } else { +- spin_lock_irqsave(&drm_minor_lock, flags); +- idr_replace(&drm_minors_idr, minor, minor->index); +- spin_unlock_irqrestore(&drm_minor_lock, flags); ++ entry = xa_store(drm_minor_get_xa(type), minor->index, minor, GFP_KERNEL); ++ if (xa_is_err(entry)) { ++ ret = xa_err(entry); ++ goto err_debugfs; + } ++ WARN_ON(entry); + + DRM_DEBUG("new minor registered %d\n", minor->index); + return 0; +@@ -206,20 +212,13 @@ static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type) + static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type type) + { + struct drm_minor *minor; +- unsigned long flags; + + minor = *drm_minor_get_slot(dev, type); + if (!minor || !device_is_registered(minor->kdev)) + return; + + /* replace @minor with NULL so lookups will fail from now on */ +- if (minor->type == DRM_MINOR_ACCEL) { +- accel_minor_replace(NULL, minor->index); +- } else { +- spin_lock_irqsave(&drm_minor_lock, flags); +- idr_replace(&drm_minors_idr, NULL, minor->index); +- spin_unlock_irqrestore(&drm_minor_lock, flags); +- } ++ xa_store(drm_minor_get_xa(type), minor->index, NULL, GFP_KERNEL); + + device_del(minor->kdev); + dev_set_drvdata(minor->kdev, NULL); /* safety belt */ +@@ -235,16 +234,15 @@ static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type typ + * minor->dev pointer will stay valid! However, the device may get unplugged and + * unregistered while you hold the minor. + */ +-struct drm_minor *drm_minor_acquire(unsigned int minor_id) ++struct drm_minor *drm_minor_acquire(struct xarray *minor_xa, unsigned int minor_id) + { + struct drm_minor *minor; +- unsigned long flags; + +- spin_lock_irqsave(&drm_minor_lock, flags); +- minor = idr_find(&drm_minors_idr, minor_id); ++ xa_lock(minor_xa); ++ minor = xa_load(minor_xa, minor_id); + if (minor) + drm_dev_get(minor->dev); +- spin_unlock_irqrestore(&drm_minor_lock, flags); ++ xa_unlock(minor_xa); + + if (!minor) { + return ERR_PTR(-ENODEV); +@@ -1038,7 +1036,7 @@ static int drm_stub_open(struct inode *inode, struct file *filp) + + DRM_DEBUG("\n"); + +- minor = drm_minor_acquire(iminor(inode)); ++ minor = drm_minor_acquire(&drm_minors_xa, iminor(inode)); + if (IS_ERR(minor)) + return PTR_ERR(minor); + +@@ -1073,7 +1071,7 @@ static void drm_core_exit(void) + unregister_chrdev(DRM_MAJOR, "drm"); + debugfs_remove(drm_debugfs_root); + drm_sysfs_destroy(); +- idr_destroy(&drm_minors_idr); ++ WARN_ON(!xa_empty(&drm_minors_xa)); + drm_connector_ida_destroy(); + } + +@@ -1082,7 +1080,6 @@ static int __init drm_core_init(void) + int ret; + + drm_connector_ida_init(); +- idr_init(&drm_minors_idr); + drm_memcpy_init_early(); + + ret = drm_sysfs_init(); +diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c +index 639fa6bef69b39..48af0e2960a226 100644 +--- a/drivers/gpu/drm/drm_file.c ++++ b/drivers/gpu/drm/drm_file.c +@@ -413,7 +413,7 @@ int drm_open(struct inode *inode, struct file *filp) + int retcode; + int need_setup = 0; + +- minor = drm_minor_acquire(iminor(inode)); ++ minor = drm_minor_acquire(&drm_minors_xa, iminor(inode)); + if (IS_ERR(minor)) + return PTR_ERR(minor); + +diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h +index ba12acd551390b..0ef5fc2a61f194 100644 +--- a/drivers/gpu/drm/drm_internal.h ++++ b/drivers/gpu/drm/drm_internal.h +@@ -77,10 +77,6 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); + void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, + uint32_t handle); + +-/* drm_drv.c */ +-struct drm_minor *drm_minor_acquire(unsigned int minor_id); +-void drm_minor_release(struct drm_minor *minor); +- + /* drm_managed.c */ + void drm_managed_release(struct drm_device *dev); + void drmm_add_final_kfree(struct drm_device *dev, void *container); +diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c +index 51f9c2db403e75..f20b864c1bb201 100644 +--- a/drivers/hwmon/asus-ec-sensors.c ++++ b/drivers/hwmon/asus-ec-sensors.c +@@ -402,7 +402,7 @@ static const struct ec_board_info board_info_strix_b550_i_gaming = { + + static const struct ec_board_info board_info_strix_x570_e_gaming = { + .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | +- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM | ++ SENSOR_TEMP_T_SENSOR | + SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU | + SENSOR_IN_CPU_CORE, + .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, +diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +index a9bafa96e2f926..6fecfe4cd08041 100644 +--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c ++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +@@ -744,6 +744,7 @@ static void mcp251xfd_chip_stop(struct mcp251xfd_priv *priv, + + mcp251xfd_chip_interrupts_disable(priv); + mcp251xfd_chip_rx_int_disable(priv); ++ mcp251xfd_timestamp_stop(priv); + mcp251xfd_chip_sleep(priv); + } + +@@ -763,6 +764,8 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv) + if (err) + goto out_chip_stop; + ++ mcp251xfd_timestamp_start(priv); ++ + err = mcp251xfd_set_bittiming(priv); + if (err) + goto out_chip_stop; +@@ -791,7 +794,7 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv) + + return 0; + +- out_chip_stop: ++out_chip_stop: + mcp251xfd_dump(priv); + mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); + +@@ -1576,7 +1579,7 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id) + handled = IRQ_HANDLED; + } while (1); + +- out_fail: ++out_fail: + can_rx_offload_threaded_irq_finish(&priv->offload); + + netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n", +@@ -1610,11 +1613,12 @@ static int mcp251xfd_open(struct net_device *ndev) + if (err) + goto out_mcp251xfd_ring_free; + ++ mcp251xfd_timestamp_init(priv); ++ + err = mcp251xfd_chip_start(priv); + if (err) + goto out_transceiver_disable; + +- mcp251xfd_timestamp_init(priv); + clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags); + can_rx_offload_enable(&priv->offload); + +@@ -1641,22 +1645,21 @@ static int mcp251xfd_open(struct net_device *ndev) + + return 0; + +- out_free_irq: ++out_free_irq: + free_irq(spi->irq, priv); +- out_destroy_workqueue: ++out_destroy_workqueue: + destroy_workqueue(priv->wq); +- out_can_rx_offload_disable: ++out_can_rx_offload_disable: + can_rx_offload_disable(&priv->offload); + set_bit(MCP251XFD_FLAGS_DOWN, priv->flags); +- mcp251xfd_timestamp_stop(priv); +- out_transceiver_disable: ++out_transceiver_disable: + mcp251xfd_transceiver_disable(priv); +- out_mcp251xfd_ring_free: ++out_mcp251xfd_ring_free: + mcp251xfd_ring_free(priv); +- out_pm_runtime_put: ++out_pm_runtime_put: + mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); + pm_runtime_put(ndev->dev.parent); +- out_close_candev: ++out_close_candev: + close_candev(ndev); + + return err; +@@ -1674,7 +1677,6 @@ static int mcp251xfd_stop(struct net_device *ndev) + free_irq(ndev->irq, priv); + destroy_workqueue(priv->wq); + can_rx_offload_disable(&priv->offload); +- mcp251xfd_timestamp_stop(priv); + mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); + mcp251xfd_transceiver_disable(priv); + mcp251xfd_ring_free(priv); +@@ -1820,9 +1822,9 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id, + *effective_speed_hz_slow = xfer[0].effective_speed_hz; + *effective_speed_hz_fast = xfer[1].effective_speed_hz; + +- out_kfree_buf_tx: ++out_kfree_buf_tx: + kfree(buf_tx); +- out_kfree_buf_rx: ++out_kfree_buf_rx: + kfree(buf_rx); + + return err; +@@ -1936,13 +1938,13 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv) + + return 0; + +- out_unregister_candev: ++out_unregister_candev: + unregister_candev(ndev); +- out_chip_sleep: ++out_chip_sleep: + mcp251xfd_chip_sleep(priv); +- out_runtime_disable: ++out_runtime_disable: + pm_runtime_disable(ndev->dev.parent); +- out_runtime_put_noidle: ++out_runtime_put_noidle: + pm_runtime_put_noidle(ndev->dev.parent); + mcp251xfd_clks_and_vdd_disable(priv); + +@@ -2162,9 +2164,9 @@ static int mcp251xfd_probe(struct spi_device *spi) + + return 0; + +- out_can_rx_offload_del: ++out_can_rx_offload_del: + can_rx_offload_del(&priv->offload); +- out_free_candev: ++out_free_candev: + spi->max_speed_hz = priv->spi_max_speed_hz_orig; + + free_candev(ndev); +diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c +index 004eaf96262bfd..050321345304be 100644 +--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c ++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c +@@ -94,7 +94,7 @@ static void mcp251xfd_dump_registers(const struct mcp251xfd_priv *priv, + kfree(buf); + } + +- out: ++out: + mcp251xfd_dump_header(iter, MCP251XFD_DUMP_OBJECT_TYPE_REG, reg); + } + +diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c +index 92b7bc7f14b9eb..65150e76200720 100644 +--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c ++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c +@@ -397,7 +397,7 @@ mcp251xfd_regmap_crc_read(void *context, + + return err; + } +- out: ++out: + memcpy(val_buf, buf_rx->data, val_len); + + return 0; +diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c +index f72582d4d3e8e2..83c18035b2a24d 100644 +--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c ++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c +@@ -290,7 +290,7 @@ int mcp251xfd_ring_init(struct mcp251xfd_priv *priv) + const struct mcp251xfd_rx_ring *rx_ring; + u16 base = 0, ram_used; + u8 fifo_nr = 1; +- int i; ++ int err = 0, i; + + netdev_reset_queue(priv->ndev); + +@@ -386,10 +386,18 @@ int mcp251xfd_ring_init(struct mcp251xfd_priv *priv) + netdev_err(priv->ndev, + "Error during ring configuration, using more RAM (%u bytes) than available (%u bytes).\n", + ram_used, MCP251XFD_RAM_SIZE); +- return -ENOMEM; ++ err = -ENOMEM; + } + +- return 0; ++ if (priv->tx_obj_num_coalesce_irq && ++ priv->tx_obj_num_coalesce_irq * 2 != priv->tx->obj_num) { ++ netdev_err(priv->ndev, ++ "Error during ring configuration, number of TEF coalescing buffers (%u) must be half of TEF buffers (%u).\n", ++ priv->tx_obj_num_coalesce_irq, priv->tx->obj_num); ++ err = -EINVAL; ++ } ++ ++ return err; + } + + void mcp251xfd_ring_free(struct mcp251xfd_priv *priv) +diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c +index 3886476a8f8efb..f732556d233a7b 100644 +--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c ++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c +@@ -219,7 +219,7 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) + total_frame_len += frame_len; + } + +- out_netif_wake_queue: ++out_netif_wake_queue: + len = i; /* number of handled goods TEFs */ + if (len) { + struct mcp251xfd_tef_ring *ring = priv->tef; +diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c +index 1db99aabe85c56..202ca0d24d03b9 100644 +--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c ++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c +@@ -48,9 +48,12 @@ void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv) + cc->shift = 1; + cc->mult = clocksource_hz2mult(priv->can.clock.freq, cc->shift); + +- timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns()); +- + INIT_DELAYED_WORK(&priv->timestamp, mcp251xfd_timestamp_work); ++} ++ ++void mcp251xfd_timestamp_start(struct mcp251xfd_priv *priv) ++{ ++ timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns()); + schedule_delayed_work(&priv->timestamp, + MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ); + } +diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +index 991662fbba42e8..dcbbd2b2fae827 100644 +--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h ++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +@@ -957,6 +957,7 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv); + int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv); + int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv); + void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv); ++void mcp251xfd_timestamp_start(struct mcp251xfd_priv *priv); + void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv); + + void mcp251xfd_tx_obj_write_sync(struct work_struct *work); +diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c +index 9135b918dd4907..848e41a4b1dbb1 100644 +--- a/drivers/net/ethernet/faraday/ftgmac100.c ++++ b/drivers/net/ethernet/faraday/ftgmac100.c +@@ -572,7 +572,7 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed) + (*processed)++; + return true; + +- drop: ++drop: + /* Clean rxdes0 (which resets own bit) */ + rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask); + priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer); +@@ -656,6 +656,11 @@ static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv) + ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat); + txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask); + ++ /* Ensure the descriptor config is visible before setting the tx ++ * pointer. ++ */ ++ smp_wmb(); ++ + priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer); + + return true; +@@ -809,6 +814,11 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb, + dma_wmb(); + first->txdes0 = cpu_to_le32(f_ctl_stat); + ++ /* Ensure the descriptor config is visible before setting the tx ++ * pointer. ++ */ ++ smp_wmb(); ++ + /* Update next TX pointer */ + priv->tx_pointer = pointer; + +@@ -829,7 +839,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb, + + return NETDEV_TX_OK; + +- dma_err: ++dma_err: + if (net_ratelimit()) + netdev_err(netdev, "map tx fragment failed\n"); + +@@ -851,7 +861,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb, + * last fragment, so we know ftgmac100_free_tx_packet() + * hasn't freed the skb yet. + */ +- drop: ++drop: + /* Drop the packet */ + dev_kfree_skb_any(skb); + netdev->stats.tx_dropped++; +@@ -1344,7 +1354,7 @@ static void ftgmac100_reset(struct ftgmac100 *priv) + ftgmac100_init_all(priv, true); + + netdev_dbg(netdev, "Reset done !\n"); +- bail: ++bail: + if (priv->mii_bus) + mutex_unlock(&priv->mii_bus->mdio_lock); + if (netdev->phydev) +@@ -1543,15 +1553,15 @@ static int ftgmac100_open(struct net_device *netdev) + + return 0; + +- err_ncsi: ++err_ncsi: + napi_disable(&priv->napi); + netif_stop_queue(netdev); +- err_alloc: ++err_alloc: + ftgmac100_free_buffers(priv); + free_irq(netdev->irq, netdev); +- err_irq: ++err_irq: + netif_napi_del(&priv->napi); +- err_hw: ++err_hw: + iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); + ftgmac100_free_rings(priv); + return err; +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +index 62aee4965021da..2a408e1ce06ec1 100644 +--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c ++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +@@ -3206,7 +3206,7 @@ void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, + { + int ret __maybe_unused = 0; + +- if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) ++ if (!iwl_trans_fw_running(fwrt->trans)) + return; + + if (fw_has_capa(&fwrt->fw->ucode_capa, +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +index 9dcc1506bd0b00..e8f48cb8d2da19 100644 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +@@ -1554,8 +1554,8 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync) + + /* prevent double restarts due to the same erroneous FW */ + if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) { +- iwl_op_mode_nic_error(trans->op_mode, sync); + trans->state = IWL_TRANS_NO_FW; ++ iwl_op_mode_nic_error(trans->op_mode, sync); + } + } + +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +index f973efbbc3795e..d2daea3b1f38ad 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +@@ -5589,6 +5589,10 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) + int i; + + if (!iwl_mvm_has_new_tx_api(mvm)) { ++ /* we can't ask the firmware anything if it is dead */ ++ if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, ++ &mvm->status)) ++ return; + if (drop) { + mutex_lock(&mvm->mutex); + iwl_mvm_flush_tx_path(mvm, +@@ -5673,8 +5677,11 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + + /* this can take a while, and we may need/want other operations + * to succeed while doing this, so do it without the mutex held ++ * If the firmware is dead, this can't work... + */ +- if (!drop && !iwl_mvm_has_new_tx_api(mvm)) ++ if (!drop && !iwl_mvm_has_new_tx_api(mvm) && ++ !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, ++ &mvm->status)) + iwl_trans_wait_tx_queues_empty(mvm->trans, msk); + } + +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +index 945524470a1e93..b2cf5aeff7e3cf 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +@@ -1418,6 +1418,8 @@ void iwl_mvm_stop_device(struct iwl_mvm *mvm) + + clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); + ++ iwl_mvm_pause_tcm(mvm, false); ++ + iwl_fw_dbg_stop_sync(&mvm->fwrt); + iwl_trans_stop_device(mvm->trans); + iwl_free_fw_paging(&mvm->fwrt); +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +index 9ca90c0806c0f1..626620cd892f00 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +@@ -48,6 +48,8 @@ + /* Number of iterations on the channel for mei filtered scan */ + #define IWL_MEI_SCAN_NUM_ITER 5U + ++#define WFA_TPC_IE_LEN 9 ++ + struct iwl_mvm_scan_timing_params { + u32 suspend_time; + u32 max_out_time; +@@ -296,8 +298,8 @@ static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm) + + max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE; + +- /* we create the 802.11 header and SSID element */ +- max_probe_len -= 24 + 2; ++ /* we create the 802.11 header SSID element and WFA TPC element */ ++ max_probe_len -= 24 + 2 + WFA_TPC_IE_LEN; + + /* DS parameter set element is added on 2.4GHZ band if required */ + if (iwl_mvm_rrm_scan_needed(mvm)) +@@ -724,8 +726,6 @@ static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies, + return newpos; + } + +-#define WFA_TPC_IE_LEN 9 +- + static void iwl_mvm_add_tpc_report_ie(u8 *pos) + { + pos[0] = WLAN_EID_VENDOR_SPECIFIC; +@@ -830,8 +830,8 @@ static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids, + return ((n_ssids <= PROBE_OPTION_MAX) && + (n_channels <= mvm->fw->ucode_capa.n_scan_channels) & + (ies->common_ie_len + +- ies->len[NL80211_BAND_2GHZ] + +- ies->len[NL80211_BAND_5GHZ] <= ++ ies->len[NL80211_BAND_2GHZ] + ies->len[NL80211_BAND_5GHZ] + ++ ies->len[NL80211_BAND_6GHZ] <= + iwl_mvm_max_scan_ie_fw_cmd_room(mvm))); + } + +@@ -3118,18 +3118,16 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, + params.n_channels = j; + } + +- if (non_psc_included && +- !iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) { +- kfree(params.channels); +- return -ENOBUFS; ++ if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) { ++ ret = -ENOBUFS; ++ goto out; + } + + uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms, type); +- +- if (non_psc_included) +- kfree(params.channels); +- if (uid < 0) +- return uid; ++ if (uid < 0) { ++ ret = uid; ++ goto out; ++ } + + ret = iwl_mvm_send_cmd(mvm, &hcmd); + if (!ret) { +@@ -3146,6 +3144,9 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; + } + ++out: ++ if (non_psc_included) ++ kfree(params.channels); + return ret; + } + +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +index fa4a1454686012..9be41673650eee 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +@@ -68,7 +68,8 @@ iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans, + } + break; + default: +- IWL_ERR(trans, "WRT: Invalid buffer destination\n"); ++ IWL_DEBUG_FW(trans, "WRT: Invalid buffer destination (%d)\n", ++ le32_to_cpu(fw_mon_cfg->buf_location)); + } + out: + if (dbg_flags) +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h +index d2b6975e71fbca..799f8a2bb0b4f1 100644 +--- a/drivers/nvme/host/nvme.h ++++ b/drivers/nvme/host/nvme.h +@@ -88,6 +88,11 @@ enum nvme_quirks { + */ + NVME_QUIRK_NO_DEEPEST_PS = (1 << 5), + ++ /* ++ * Problems seen with concurrent commands ++ */ ++ NVME_QUIRK_QDEPTH_ONE = (1 << 6), ++ + /* + * Set MEDIUM priority on SQ creation + */ +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index 7fc1ab4d9e7d85..32b5cc76a0223c 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -2526,15 +2526,8 @@ static int nvme_pci_enable(struct nvme_dev *dev) + else + dev->io_sqes = NVME_NVM_IOSQES; + +- /* +- * Temporary fix for the Apple controller found in the MacBook8,1 and +- * some MacBook7,1 to avoid controller resets and data loss. +- */ +- if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { ++ if (dev->ctrl.quirks & NVME_QUIRK_QDEPTH_ONE) { + dev->q_depth = 2; +- dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " +- "set queue depth=%u to work around controller resets\n", +- dev->q_depth); + } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && + (pdev->device == 0xa821 || pdev->device == 0xa822) && + NVME_CAP_MQES(dev->ctrl.cap) == 0) { +@@ -3399,6 +3392,8 @@ static const struct pci_device_id nvme_id_table[] = { + NVME_QUIRK_BOGUS_NID, }, + { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, ++ { PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */ ++ .driver_data = NVME_QUIRK_QDEPTH_ONE }, + { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS | + NVME_QUIRK_BOGUS_NID, }, +@@ -3531,7 +3526,12 @@ static const struct pci_device_id nvme_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02), + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), +- .driver_data = NVME_QUIRK_SINGLE_VECTOR }, ++ /* ++ * Fix for the Apple controller found in the MacBook8,1 and ++ * some MacBook7,1 to avoid controller resets and data loss. ++ */ ++ .driver_data = NVME_QUIRK_SINGLE_VECTOR | ++ NVME_QUIRK_QDEPTH_ONE }, + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), + .driver_data = NVME_QUIRK_SINGLE_VECTOR | +diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c +index 608f55c5ba5fe6..ad30fd47a4bb0b 100644 +--- a/drivers/pinctrl/pinctrl-at91.c ++++ b/drivers/pinctrl/pinctrl-at91.c +@@ -1410,8 +1410,11 @@ static int at91_pinctrl_probe(struct platform_device *pdev) + + /* We will handle a range of GPIO pins */ + for (i = 0; i < gpio_banks; i++) +- if (gpio_chips[i]) ++ if (gpio_chips[i]) { + pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range); ++ gpiochip_add_pin_range(&gpio_chips[i]->chip, dev_name(info->pctl->dev), 0, ++ gpio_chips[i]->range.pin_base, gpio_chips[i]->range.npins); ++ } + + dev_info(dev, "initialized AT91 pinctrl driver\n"); + +diff --git a/drivers/platform/x86/x86-android-tablets/dmi.c b/drivers/platform/x86/x86-android-tablets/dmi.c +index 5d6c12494f082a..0c9d9caf074cb6 100644 +--- a/drivers/platform/x86/x86-android-tablets/dmi.c ++++ b/drivers/platform/x86/x86-android-tablets/dmi.c +@@ -122,7 +122,6 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = { + /* Lenovo Yoga Tab 3 Pro YT3-X90F */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), +- DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), + }, + .driver_data = (void *)&lenovo_yt3_info, +diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c +index 9d3e102f1a76b6..3dfe45ac300aa9 100644 +--- a/drivers/powercap/intel_rapl_common.c ++++ b/drivers/powercap/intel_rapl_common.c +@@ -1280,6 +1280,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { + + X86_MATCH_VENDOR_FAM(AMD, 0x17, &rapl_defaults_amd), + X86_MATCH_VENDOR_FAM(AMD, 0x19, &rapl_defaults_amd), ++ X86_MATCH_VENDOR_FAM(AMD, 0x1A, &rapl_defaults_amd), + X86_MATCH_VENDOR_FAM(HYGON, 0x18, &rapl_defaults_amd), + {} + }; +diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c +index c305d16cfae9aa..0166f86c7b71a0 100644 +--- a/drivers/scsi/lpfc/lpfc_bsg.c ++++ b/drivers/scsi/lpfc/lpfc_bsg.c +@@ -5409,7 +5409,7 @@ lpfc_get_cgnbuf_info(struct bsg_job *job) + struct get_cgnbuf_info_req *cgnbuf_req; + struct lpfc_cgn_info *cp; + uint8_t *cgn_buff; +- int size, cinfosz; ++ size_t size, cinfosz; + int rc = 0; + + if (job->request_len < sizeof(struct fc_bsg_request) + +diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c +index aac41bd05f98f8..2fb8d4e55c7773 100644 +--- a/drivers/spi/spi-bcm63xx.c ++++ b/drivers/spi/spi-bcm63xx.c +@@ -472,6 +472,7 @@ static const struct of_device_id bcm63xx_spi_of_match[] = { + { .compatible = "brcm,bcm6358-spi", .data = &bcm6358_spi_reg_offsets }, + { }, + }; ++MODULE_DEVICE_TABLE(of, bcm63xx_spi_of_match); + + static int bcm63xx_spi_probe(struct platform_device *pdev) + { +diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c +index b97206d47ec6d9..16bb4fc3a4ba9f 100644 +--- a/drivers/spi/spidev.c ++++ b/drivers/spi/spidev.c +@@ -706,6 +706,7 @@ static struct class *spidev_class; + static const struct spi_device_id spidev_spi_ids[] = { + { .name = "bh2228fv" }, + { .name = "dh2228fv" }, ++ { .name = "jg10309-01" }, + { .name = "ltc2488" }, + { .name = "sx1301" }, + { .name = "bk4" }, +@@ -735,6 +736,7 @@ static int spidev_of_check(struct device *dev) + static const struct of_device_id spidev_dt_ids[] = { + { .compatible = "cisco,spi-petra", .data = &spidev_of_check }, + { .compatible = "dh,dhcom-board", .data = &spidev_of_check }, ++ { .compatible = "elgin,jg10309-01", .data = &spidev_of_check }, + { .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check }, + { .compatible = "lwn,bk4", .data = &spidev_of_check }, + { .compatible = "menlo,m53cpld", .data = &spidev_of_check }, +diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c +index 311007b1d90465..c2e666e82857c1 100644 +--- a/drivers/usb/class/usbtmc.c ++++ b/drivers/usb/class/usbtmc.c +@@ -754,7 +754,7 @@ static struct urb *usbtmc_create_urb(void) + if (!urb) + return NULL; + +- dmabuf = kmalloc(bufsize, GFP_KERNEL); ++ dmabuf = kzalloc(bufsize, GFP_KERNEL); + if (!dmabuf) { + usb_free_urb(urb); + return NULL; +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c +index d93f5d58455782..8e327fcb222f73 100644 +--- a/drivers/usb/serial/pl2303.c ++++ b/drivers/usb/serial/pl2303.c +@@ -118,6 +118,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) }, + { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) }, + { USB_DEVICE(IBM_VENDOR_ID, IBM_PRODUCT_ID) }, ++ { USB_DEVICE(MACROSILICON_VENDOR_ID, MACROSILICON_MS3020_PRODUCT_ID) }, + { } /* Terminating entry */ + }; + +diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h +index 732f9b13ad5d59..d60eda7f6edaf8 100644 +--- a/drivers/usb/serial/pl2303.h ++++ b/drivers/usb/serial/pl2303.h +@@ -171,3 +171,7 @@ + /* Allied Telesis VT-Kit3 */ + #define AT_VENDOR_ID 0x0caa + #define AT_VTKIT3_PRODUCT_ID 0x3001 ++ ++/* Macrosilicon MS3020 */ ++#define MACROSILICON_VENDOR_ID 0x345f ++#define MACROSILICON_MS3020_PRODUCT_ID 0x3020 +diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c +index b562cfef888adf..29d53d1d1476bd 100644 +--- a/fs/ocfs2/xattr.c ++++ b/fs/ocfs2/xattr.c +@@ -1062,13 +1062,13 @@ ssize_t ocfs2_listxattr(struct dentry *dentry, + return i_ret + b_ret; + } + +-static int ocfs2_xattr_find_entry(int name_index, ++static int ocfs2_xattr_find_entry(struct inode *inode, int name_index, + const char *name, + struct ocfs2_xattr_search *xs) + { + struct ocfs2_xattr_entry *entry; + size_t name_len; +- int i, cmp = 1; ++ int i, name_offset, cmp = 1; + + if (name == NULL) + return -EINVAL; +@@ -1076,13 +1076,22 @@ static int ocfs2_xattr_find_entry(int name_index, + name_len = strlen(name); + entry = xs->here; + for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) { ++ if ((void *)entry >= xs->end) { ++ ocfs2_error(inode->i_sb, "corrupted xattr entries"); ++ return -EFSCORRUPTED; ++ } + cmp = name_index - ocfs2_xattr_get_type(entry); + if (!cmp) + cmp = name_len - entry->xe_name_len; +- if (!cmp) +- cmp = memcmp(name, (xs->base + +- le16_to_cpu(entry->xe_name_offset)), +- name_len); ++ if (!cmp) { ++ name_offset = le16_to_cpu(entry->xe_name_offset); ++ if ((xs->base + name_offset + name_len) > xs->end) { ++ ocfs2_error(inode->i_sb, ++ "corrupted xattr entries"); ++ return -EFSCORRUPTED; ++ } ++ cmp = memcmp(name, (xs->base + name_offset), name_len); ++ } + if (cmp == 0) + break; + entry += 1; +@@ -1166,7 +1175,7 @@ static int ocfs2_xattr_ibody_get(struct inode *inode, + xs->base = (void *)xs->header; + xs->here = xs->header->xh_entries; + +- ret = ocfs2_xattr_find_entry(name_index, name, xs); ++ ret = ocfs2_xattr_find_entry(inode, name_index, name, xs); + if (ret) + return ret; + size = le64_to_cpu(xs->here->xe_value_size); +@@ -2698,7 +2707,7 @@ static int ocfs2_xattr_ibody_find(struct inode *inode, + + /* Find the named attribute. */ + if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) { +- ret = ocfs2_xattr_find_entry(name_index, name, xs); ++ ret = ocfs2_xattr_find_entry(inode, name_index, name, xs); + if (ret && ret != -ENODATA) + return ret; + xs->not_found = ret; +@@ -2833,7 +2842,7 @@ static int ocfs2_xattr_block_find(struct inode *inode, + xs->end = (void *)(blk_bh->b_data) + blk_bh->b_size; + xs->here = xs->header->xh_entries; + +- ret = ocfs2_xattr_find_entry(name_index, name, xs); ++ ret = ocfs2_xattr_find_entry(inode, name_index, name, xs); + } else + ret = ocfs2_xattr_index_block_find(inode, blk_bh, + name_index, +diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c +index d2307162a2de15..e325e06357ffb7 100644 +--- a/fs/smb/client/connect.c ++++ b/fs/smb/client/connect.c +@@ -656,6 +656,19 @@ allocate_buffers(struct TCP_Server_Info *server) + static bool + server_unresponsive(struct TCP_Server_Info *server) + { ++ /* ++ * If we're in the process of mounting a share or reconnecting a session ++ * and the server abruptly shut down (e.g. socket wasn't closed, packet ++ * had been ACK'ed but no SMB response), don't wait longer than 20s to ++ * negotiate protocol. ++ */ ++ spin_lock(&server->srv_lock); ++ if (server->tcpStatus == CifsInNegotiate && ++ time_after(jiffies, server->lstrp + 20 * HZ)) { ++ spin_unlock(&server->srv_lock); ++ cifs_reconnect(server, false); ++ return true; ++ } + /* + * We need to wait 3 echo intervals to make sure we handle such + * situations right: +@@ -667,7 +680,6 @@ server_unresponsive(struct TCP_Server_Info *server) + * 65s kernel_recvmsg times out, and we see that we haven't gotten + * a response in >60s. + */ +- spin_lock(&server->srv_lock); + if ((server->tcpStatus == CifsGood || + server->tcpStatus == CifsNeedNegotiate) && + (!server->ops->can_echo || server->ops->can_echo(server)) && +diff --git a/include/drm/drm_accel.h b/include/drm/drm_accel.h +index d4955062c77e39..f93e23985f4e4b 100644 +--- a/include/drm/drm_accel.h ++++ b/include/drm/drm_accel.h +@@ -51,11 +51,10 @@ + + #if IS_ENABLED(CONFIG_DRM_ACCEL) + ++extern struct xarray accel_minors_xa; ++ + void accel_core_exit(void); + int accel_core_init(void); +-void accel_minor_remove(int index); +-int accel_minor_alloc(void); +-void accel_minor_replace(struct drm_minor *minor, int index); + void accel_set_device_instance_params(struct device *kdev, int index); + int accel_open(struct inode *inode, struct file *filp); + void accel_debugfs_init(struct drm_minor *minor, int minor_id); +@@ -72,19 +71,6 @@ static inline int __init accel_core_init(void) + return 0; + } + +-static inline void accel_minor_remove(int index) +-{ +-} +- +-static inline int accel_minor_alloc(void) +-{ +- return -EOPNOTSUPP; +-} +- +-static inline void accel_minor_replace(struct drm_minor *minor, int index) +-{ +-} +- + static inline void accel_set_device_instance_params(struct device *kdev, int index) + { + } +diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h +index c8c2a63b9e7e05..cc61f6a2b2ad6e 100644 +--- a/include/drm/drm_file.h ++++ b/include/drm/drm_file.h +@@ -45,6 +45,8 @@ struct drm_printer; + struct device; + struct file; + ++extern struct xarray drm_minors_xa; ++ + /* + * FIXME: Not sure we want to have drm_minor here in the end, but to avoid + * header include loops we need it here for now. +@@ -441,6 +443,9 @@ static inline bool drm_is_accel_client(const struct drm_file *file_priv) + + void drm_file_update_pid(struct drm_file *); + ++struct drm_minor *drm_minor_acquire(struct xarray *minors_xa, unsigned int minor_id); ++void drm_minor_release(struct drm_minor *minor); ++ + int drm_open(struct inode *inode, struct file *filp); + int drm_open_helper(struct file *filp, struct drm_minor *minor); + ssize_t drm_read(struct file *filp, char __user *buffer, +diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h +index 8af2543520b999..1b95c34a4e3d11 100644 +--- a/include/net/netfilter/nf_tables.h ++++ b/include/net/netfilter/nf_tables.h +@@ -297,9 +297,22 @@ struct nft_set_elem { + void *priv; + }; + ++/** ++ * enum nft_iter_type - nftables set iterator type ++ * ++ * @NFT_ITER_READ: read-only iteration over set elements ++ * @NFT_ITER_UPDATE: iteration under mutex to update set element state ++ */ ++enum nft_iter_type { ++ NFT_ITER_UNSPEC, ++ NFT_ITER_READ, ++ NFT_ITER_UPDATE, ++}; ++ + struct nft_set; + struct nft_set_iter { + u8 genmask; ++ enum nft_iter_type type:8; + unsigned int count; + unsigned int skip; + int err; +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c +index 46b02a6ae0a368..415e951e4138a5 100644 +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -5311,8 +5311,10 @@ ieee80211_beacon_get_ap(struct ieee80211_hw *hw, + if (beacon->tail) + skb_put_data(skb, beacon->tail, beacon->tail_len); + +- if (ieee80211_beacon_protect(skb, local, sdata, link) < 0) ++ if (ieee80211_beacon_protect(skb, local, sdata, link) < 0) { ++ dev_kfree_skb(skb); + return NULL; ++ } + + ieee80211_beacon_get_finish(hw, vif, link, offs, beacon, skb, + chanctx_conf, csa_off_base); +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index fc99a5e91829dd..da5684e3fd08c8 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -628,6 +628,7 @@ static void nft_map_deactivate(const struct nft_ctx *ctx, struct nft_set *set) + { + struct nft_set_iter iter = { + .genmask = nft_genmask_next(ctx->net), ++ .type = NFT_ITER_UPDATE, + .fn = nft_mapelem_deactivate, + }; + +@@ -5392,6 +5393,7 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, + } + + iter.genmask = nft_genmask_next(ctx->net); ++ iter.type = NFT_ITER_UPDATE; + iter.skip = 0; + iter.count = 0; + iter.err = 0; +@@ -5467,6 +5469,7 @@ static void nft_map_activate(const struct nft_ctx *ctx, struct nft_set *set) + { + struct nft_set_iter iter = { + .genmask = nft_genmask_next(ctx->net), ++ .type = NFT_ITER_UPDATE, + .fn = nft_mapelem_activate, + }; + +@@ -5845,6 +5848,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) + args.skb = skb; + args.reset = reset; + args.iter.genmask = nft_genmask_cur(net); ++ args.iter.type = NFT_ITER_READ; + args.iter.skip = cb->args[0]; + args.iter.count = 0; + args.iter.err = 0; +@@ -7246,6 +7250,7 @@ static int nft_set_flush(struct nft_ctx *ctx, struct nft_set *set, u8 genmask) + { + struct nft_set_iter iter = { + .genmask = genmask, ++ .type = NFT_ITER_UPDATE, + .fn = nft_setelem_flush, + }; + +diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c +index 7ba4b1454db46c..1b9edf2b339373 100644 +--- a/net/netfilter/nft_lookup.c ++++ b/net/netfilter/nft_lookup.c +@@ -217,6 +217,7 @@ static int nft_lookup_validate(const struct nft_ctx *ctx, + return 0; + + iter.genmask = nft_genmask_next(ctx->net); ++ iter.type = NFT_ITER_UPDATE; + iter.skip = 0; + iter.count = 0; + iter.err = 0; +diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c +index e4dd7309304840..22407e7e0b51e3 100644 +--- a/net/netfilter/nft_set_pipapo.c ++++ b/net/netfilter/nft_set_pipapo.c +@@ -2037,13 +2037,15 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set, + struct nft_set_iter *iter) + { + struct nft_pipapo *priv = nft_set_priv(set); +- struct net *net = read_pnet(&set->net); + const struct nft_pipapo_match *m; + const struct nft_pipapo_field *f; + int i, r; + ++ WARN_ON_ONCE(iter->type != NFT_ITER_READ && ++ iter->type != NFT_ITER_UPDATE); ++ + rcu_read_lock(); +- if (iter->genmask == nft_genmask_cur(net)) ++ if (iter->type == NFT_ITER_READ) + m = rcu_dereference(priv->match); + else + m = priv->clone; +diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c +index 765ffd6e06bc41..0a8883a93e8369 100644 +--- a/net/netfilter/nft_socket.c ++++ b/net/netfilter/nft_socket.c +@@ -9,7 +9,8 @@ + + struct nft_socket { + enum nft_socket_keys key:8; +- u8 level; ++ u8 level; /* cgroupv2 level to extract */ ++ u8 level_user; /* cgroupv2 level provided by userspace */ + u8 len; + union { + u8 dreg; +@@ -53,6 +54,28 @@ nft_sock_get_eval_cgroupv2(u32 *dest, struct sock *sk, const struct nft_pktinfo + memcpy(dest, &cgid, sizeof(u64)); + return true; + } ++ ++/* process context only, uses current->nsproxy. */ ++static noinline int nft_socket_cgroup_subtree_level(void) ++{ ++ struct cgroup *cgrp = cgroup_get_from_path("/"); ++ int level; ++ ++ if (IS_ERR(cgrp)) ++ return PTR_ERR(cgrp); ++ ++ level = cgrp->level; ++ ++ cgroup_put(cgrp); ++ ++ if (WARN_ON_ONCE(level > 255)) ++ return -ERANGE; ++ ++ if (WARN_ON_ONCE(level < 0)) ++ return -EINVAL; ++ ++ return level; ++} + #endif + + static struct sock *nft_socket_do_lookup(const struct nft_pktinfo *pkt) +@@ -174,9 +197,10 @@ static int nft_socket_init(const struct nft_ctx *ctx, + case NFT_SOCKET_MARK: + len = sizeof(u32); + break; +-#ifdef CONFIG_CGROUPS ++#ifdef CONFIG_SOCK_CGROUP_DATA + case NFT_SOCKET_CGROUPV2: { + unsigned int level; ++ int err; + + if (!tb[NFTA_SOCKET_LEVEL]) + return -EINVAL; +@@ -185,6 +209,17 @@ static int nft_socket_init(const struct nft_ctx *ctx, + if (level > 255) + return -EOPNOTSUPP; + ++ err = nft_socket_cgroup_subtree_level(); ++ if (err < 0) ++ return err; ++ ++ priv->level_user = level; ++ ++ level += err; ++ /* Implies a giant cgroup tree */ ++ if (WARN_ON_ONCE(level > 255)) ++ return -EOPNOTSUPP; ++ + priv->level = level; + len = sizeof(u64); + break; +@@ -209,7 +244,7 @@ static int nft_socket_dump(struct sk_buff *skb, + if (nft_dump_register(skb, NFTA_SOCKET_DREG, priv->dreg)) + return -1; + if (priv->key == NFT_SOCKET_CGROUPV2 && +- nla_put_be32(skb, NFTA_SOCKET_LEVEL, htonl(priv->level))) ++ nla_put_be32(skb, NFTA_SOCKET_LEVEL, htonl(priv->level_user))) + return -1; + return 0; + } +diff --git a/net/wireless/core.h b/net/wireless/core.h +index c955be6c6daa45..f0a3a231763854 100644 +--- a/net/wireless/core.h ++++ b/net/wireless/core.h +@@ -228,7 +228,6 @@ void cfg80211_register_wdev(struct cfg80211_registered_device *rdev, + static inline void wdev_lock(struct wireless_dev *wdev) + __acquires(wdev) + { +- lockdep_assert_held(&wdev->wiphy->mtx); + mutex_lock(&wdev->mtx); + __acquire(wdev->mtx); + } +@@ -236,16 +235,11 @@ static inline void wdev_lock(struct wireless_dev *wdev) + static inline void wdev_unlock(struct wireless_dev *wdev) + __releases(wdev) + { +- lockdep_assert_held(&wdev->wiphy->mtx); + __release(wdev->mtx); + mutex_unlock(&wdev->mtx); + } + +-static inline void ASSERT_WDEV_LOCK(struct wireless_dev *wdev) +-{ +- lockdep_assert_held(&wdev->wiphy->mtx); +- lockdep_assert_held(&wdev->mtx); +-} ++#define ASSERT_WDEV_LOCK(wdev) lockdep_assert_held(&(wdev)->mtx) + + static inline bool cfg80211_has_monitors_only(struct cfg80211_registered_device *rdev) + { +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c +index 82c0d3a3327abe..f030700cd60d75 100644 +--- a/sound/pci/hda/patch_hdmi.c ++++ b/sound/pci/hda/patch_hdmi.c +@@ -4646,6 +4646,7 @@ HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_adlp_hdmi), + HDA_CODEC_ENTRY(0x8086281d, "Meteor Lake HDMI", patch_i915_adlp_hdmi), + HDA_CODEC_ENTRY(0x8086281f, "Raptor Lake P HDMI", patch_i915_adlp_hdmi), + HDA_CODEC_ENTRY(0x80862820, "Lunar Lake HDMI", patch_i915_adlp_hdmi), ++HDA_CODEC_ENTRY(0x80862822, "Panther Lake HDMI", patch_i915_adlp_hdmi), + HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), + HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi), + HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi), +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 6661fed2c2bbf1..130508f5ad9c8a 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -4931,6 +4931,30 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec, + } + } + ++static void alc_hp_mute_disable(struct hda_codec *codec, unsigned int delay) ++{ ++ if (delay <= 0) ++ delay = 75; ++ snd_hda_codec_write(codec, 0x21, 0, ++ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); ++ msleep(delay); ++ snd_hda_codec_write(codec, 0x21, 0, ++ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); ++ msleep(delay); ++} ++ ++static void alc_hp_enable_unmute(struct hda_codec *codec, unsigned int delay) ++{ ++ if (delay <= 0) ++ delay = 75; ++ snd_hda_codec_write(codec, 0x21, 0, ++ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); ++ msleep(delay); ++ snd_hda_codec_write(codec, 0x21, 0, ++ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); ++ msleep(delay); ++} ++ + static const struct coef_fw alc225_pre_hsmode[] = { + UPDATE_COEF(0x4a, 1<<8, 0), + UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), +@@ -5032,6 +5056,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) + case 0x10ec0236: + case 0x10ec0256: + case 0x19e58326: ++ alc_hp_mute_disable(codec, 75); + alc_process_coef_fw(codec, coef0256); + break; + case 0x10ec0234: +@@ -5066,6 +5091,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) + case 0x10ec0295: + case 0x10ec0289: + case 0x10ec0299: ++ alc_hp_mute_disable(codec, 75); + alc_process_coef_fw(codec, alc225_pre_hsmode); + alc_process_coef_fw(codec, coef0225); + break; +@@ -5291,6 +5317,7 @@ static void alc_headset_mode_default(struct hda_codec *codec) + case 0x10ec0299: + alc_process_coef_fw(codec, alc225_pre_hsmode); + alc_process_coef_fw(codec, coef0225); ++ alc_hp_enable_unmute(codec, 75); + break; + case 0x10ec0255: + alc_process_coef_fw(codec, coef0255); +@@ -5303,6 +5330,7 @@ static void alc_headset_mode_default(struct hda_codec *codec) + alc_write_coef_idx(codec, 0x45, 0xc089); + msleep(50); + alc_process_coef_fw(codec, coef0256); ++ alc_hp_enable_unmute(codec, 75); + break; + case 0x10ec0234: + case 0x10ec0274: +@@ -5400,6 +5428,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) + case 0x10ec0256: + case 0x19e58326: + alc_process_coef_fw(codec, coef0256); ++ alc_hp_enable_unmute(codec, 75); + break; + case 0x10ec0234: + case 0x10ec0274: +@@ -5448,6 +5477,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) + alc_process_coef_fw(codec, coef0225_2); + else + alc_process_coef_fw(codec, coef0225_1); ++ alc_hp_enable_unmute(codec, 75); + break; + case 0x10ec0867: + alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0); +@@ -5515,6 +5545,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec) + case 0x10ec0256: + case 0x19e58326: + alc_process_coef_fw(codec, coef0256); ++ alc_hp_enable_unmute(codec, 75); + break; + case 0x10ec0234: + case 0x10ec0274: +@@ -5552,6 +5583,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec) + case 0x10ec0289: + case 0x10ec0299: + alc_process_coef_fw(codec, coef0225); ++ alc_hp_enable_unmute(codec, 75); + break; + } + codec_dbg(codec, "Headset jack set to Nokia-style headset mode.\n"); +@@ -5620,25 +5652,21 @@ static void alc_determine_headset_type(struct hda_codec *codec) + alc_write_coef_idx(codec, 0x06, 0x6104); + alc_write_coefex_idx(codec, 0x57, 0x3, 0x09a3); + +- snd_hda_codec_write(codec, 0x21, 0, +- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); +- msleep(80); +- snd_hda_codec_write(codec, 0x21, 0, +- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); +- + alc_process_coef_fw(codec, coef0255); + msleep(300); + val = alc_read_coef_idx(codec, 0x46); + is_ctia = (val & 0x0070) == 0x0070; +- ++ if (!is_ctia) { ++ alc_write_coef_idx(codec, 0x45, 0xe089); ++ msleep(100); ++ val = alc_read_coef_idx(codec, 0x46); ++ if ((val & 0x0070) == 0x0070) ++ is_ctia = false; ++ else ++ is_ctia = true; ++ } + alc_write_coefex_idx(codec, 0x57, 0x3, 0x0da3); + alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0); +- +- snd_hda_codec_write(codec, 0x21, 0, +- AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); +- msleep(80); +- snd_hda_codec_write(codec, 0x21, 0, +- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); + break; + case 0x10ec0234: + case 0x10ec0274: +@@ -5715,12 +5743,6 @@ static void alc_determine_headset_type(struct hda_codec *codec) + case 0x10ec0295: + case 0x10ec0289: + case 0x10ec0299: +- snd_hda_codec_write(codec, 0x21, 0, +- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); +- msleep(80); +- snd_hda_codec_write(codec, 0x21, 0, +- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); +- + alc_process_coef_fw(codec, alc225_pre_hsmode); + alc_update_coef_idx(codec, 0x67, 0xf000, 0x1000); + val = alc_read_coef_idx(codec, 0x45); +@@ -5737,15 +5759,19 @@ static void alc_determine_headset_type(struct hda_codec *codec) + val = alc_read_coef_idx(codec, 0x46); + is_ctia = (val & 0x00f0) == 0x00f0; + } ++ if (!is_ctia) { ++ alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x38<<10); ++ alc_update_coef_idx(codec, 0x49, 3<<8, 1<<8); ++ msleep(100); ++ val = alc_read_coef_idx(codec, 0x46); ++ if ((val & 0x00f0) == 0x00f0) ++ is_ctia = false; ++ else ++ is_ctia = true; ++ } + alc_update_coef_idx(codec, 0x4a, 7<<6, 7<<6); + alc_update_coef_idx(codec, 0x4a, 3<<4, 3<<4); + alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000); +- +- snd_hda_codec_write(codec, 0x21, 0, +- AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); +- msleep(80); +- snd_hda_codec_write(codec, 0x21, 0, +- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); + break; + case 0x10ec0867: + is_ctia = true; +diff --git a/sound/soc/amd/acp/acp-sof-mach.c b/sound/soc/amd/acp/acp-sof-mach.c +index 354d0fc55299b2..0c5254c52b7945 100644 +--- a/sound/soc/amd/acp/acp-sof-mach.c ++++ b/sound/soc/amd/acp/acp-sof-mach.c +@@ -162,6 +162,8 @@ static const struct platform_device_id board_ids[] = { + }, + { } + }; ++MODULE_DEVICE_TABLE(platform, board_ids); ++ + static struct platform_driver acp_asoc_audio = { + .driver = { + .name = "sof_mach", +diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c +index f6c1dbd0ebcf57..248e3bcbf386b0 100644 +--- a/sound/soc/amd/yc/acp6x-mach.c ++++ b/sound/soc/amd/yc/acp6x-mach.c +@@ -353,6 +353,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 15 C7VF"), + } + }, ++ { ++ .driver_data = &acp6x_card, ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 17 D7VEK"), ++ } ++ }, + { + .driver_data = &acp6x_card, + .matches = { +diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c +index 400eaf9f8b1407..f185711180cb46 100644 +--- a/sound/soc/au1x/db1200.c ++++ b/sound/soc/au1x/db1200.c +@@ -44,6 +44,7 @@ static const struct platform_device_id db1200_pids[] = { + }, + {}, + }; ++MODULE_DEVICE_TABLE(platform, db1200_pids); + + /*------------------------- AC97 PART ---------------------------*/ + +diff --git a/sound/soc/codecs/chv3-codec.c b/sound/soc/codecs/chv3-codec.c +index ab99effa68748d..40020500b1fe89 100644 +--- a/sound/soc/codecs/chv3-codec.c ++++ b/sound/soc/codecs/chv3-codec.c +@@ -26,6 +26,7 @@ static const struct of_device_id chv3_codec_of_match[] = { + { .compatible = "google,chv3-codec", }, + { } + }; ++MODULE_DEVICE_TABLE(of, chv3_codec_of_match); + + static struct platform_driver chv3_codec_platform_driver = { + .driver = { +diff --git a/sound/soc/codecs/tda7419.c b/sound/soc/codecs/tda7419.c +index e187d74a17376f..3914deb060cacf 100644 +--- a/sound/soc/codecs/tda7419.c ++++ b/sound/soc/codecs/tda7419.c +@@ -623,6 +623,7 @@ static const struct of_device_id tda7419_of_match[] = { + { .compatible = "st,tda7419" }, + { }, + }; ++MODULE_DEVICE_TABLE(of, tda7419_of_match); + + static struct i2c_driver tda7419_driver = { + .driver = { +diff --git a/sound/soc/google/chv3-i2s.c b/sound/soc/google/chv3-i2s.c +index 0f65134449066b..462e970b954f10 100644 +--- a/sound/soc/google/chv3-i2s.c ++++ b/sound/soc/google/chv3-i2s.c +@@ -322,6 +322,7 @@ static const struct of_device_id chv3_i2s_of_match[] = { + { .compatible = "google,chv3-i2s" }, + {}, + }; ++MODULE_DEVICE_TABLE(of, chv3_i2s_of_match); + + static struct platform_driver chv3_i2s_driver = { + .probe = chv3_i2s_probe, +diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c +index 5e2ec60e2954b2..e4c3492a0c2824 100644 +--- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c ++++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c +@@ -84,7 +84,6 @@ static const struct dmi_system_id lenovo_yoga_tab3_x90[] = { + /* Lenovo Yoga Tab 3 Pro YT3-X90, codec missing from DSDT */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), +- DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), + }, + }, +diff --git a/sound/soc/intel/keembay/kmb_platform.c b/sound/soc/intel/keembay/kmb_platform.c +index 6b06b7b5ede869..ffe558ef49220a 100644 +--- a/sound/soc/intel/keembay/kmb_platform.c ++++ b/sound/soc/intel/keembay/kmb_platform.c +@@ -815,6 +815,7 @@ static const struct of_device_id kmb_plat_of_match[] = { + { .compatible = "intel,keembay-tdm", .data = &intel_kmb_tdm_dai}, + {} + }; ++MODULE_DEVICE_TABLE(of, kmb_plat_of_match); + + static int kmb_plat_dai_probe(struct platform_device *pdev) + { +diff --git a/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c b/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c +index 5e14655c5617ed..11f30b183520ff 100644 +--- a/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c ++++ b/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c +@@ -2748,6 +2748,7 @@ static bool mt8188_is_volatile_reg(struct device *dev, unsigned int reg) + case AFE_ASRC12_NEW_CON9: + case AFE_LRCK_CNT: + case AFE_DAC_MON0: ++ case AFE_DAC_CON0: + case AFE_DL2_CUR: + case AFE_DL3_CUR: + case AFE_DL6_CUR: +diff --git a/sound/soc/sof/mediatek/mt8195/mt8195.c b/sound/soc/sof/mediatek/mt8195/mt8195.c +index b5b4ea854da4b8..94db51d88dda0b 100644 +--- a/sound/soc/sof/mediatek/mt8195/mt8195.c ++++ b/sound/soc/sof/mediatek/mt8195/mt8195.c +@@ -625,6 +625,9 @@ static struct snd_sof_of_mach sof_mt8195_machs[] = { + { + .compatible = "google,tomato", + .sof_tplg_filename = "sof-mt8195-mt6359-rt1019-rt5682.tplg" ++ }, { ++ .compatible = "google,dojo", ++ .sof_tplg_filename = "sof-mt8195-mt6359-max98390-rt5682.tplg" + }, { + .compatible = "mediatek,mt8195", + .sof_tplg_filename = "sof-mt8195.tplg" +diff --git a/tools/hv/Makefile b/tools/hv/Makefile +index fe770e679ae8fe..5643058e2d377b 100644 +--- a/tools/hv/Makefile ++++ b/tools/hv/Makefile +@@ -47,7 +47,7 @@ $(OUTPUT)hv_fcopy_daemon: $(HV_FCOPY_DAEMON_IN) + + clean: + rm -f $(ALL_PROGRAMS) +- find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete ++ find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete -o -name '\.*.cmd' -delete + + install: $(ALL_PROGRAMS) + install -d -m 755 $(DESTDIR)$(sbindir); \ diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.53-54.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.53-54.patch new file mode 100644 index 000000000000..2f42bcea2352 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.53-54.patch @@ -0,0 +1,22341 @@ +diff --git a/.gitignore b/.gitignore +index 0bbae167bf93e9..d1a8ab3f98aaf1 100644 +--- a/.gitignore ++++ b/.gitignore +@@ -135,7 +135,6 @@ GTAGS + # id-utils files + ID + +-*.orig + *~ + \#*# + +diff --git a/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 b/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 +index 31dbb390573ff2..c431f0a13cf502 100644 +--- a/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 ++++ b/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 +@@ -3,7 +3,7 @@ KernelVersion: + Contact: linux-iio@vger.kernel.org + Description: + Reading this returns the valid values that can be written to the +- on_altvoltage0_mode attribute: ++ filter_mode attribute: + + - auto -> Adjust bandpass filter to track changes in input clock rate. + - manual -> disable/unregister the clock rate notifier / input clock tracking. +diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst +index 357d6cb98161f9..3084f5cf5e40db 100644 +--- a/Documentation/arch/arm64/silicon-errata.rst ++++ b/Documentation/arch/arm64/silicon-errata.rst +@@ -54,6 +54,8 @@ stable kernels. + +----------------+-----------------+-----------------+-----------------------------+ + | Ampere | AmpereOne | AC03_CPU_38 | AMPERE_ERRATUM_AC03_CPU_38 | + +----------------+-----------------+-----------------+-----------------------------+ ++| Ampere | AmpereOne AC04 | AC04_CPU_10 | AMPERE_ERRATUM_AC03_CPU_38 | +++----------------+-----------------+-----------------+-----------------------------+ + +----------------+-----------------+-----------------+-----------------------------+ + | ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 | + +----------------+-----------------+-----------------+-----------------------------+ +diff --git a/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml b/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml +index 9790f75fc669ef..fe5145d3b73cf2 100644 +--- a/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml ++++ b/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml +@@ -23,7 +23,6 @@ properties: + - ak8963 + - ak09911 + - ak09912 +- - ak09916 + deprecated: true + + reg: +diff --git a/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml b/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml +index 7fd59114548001..902db92da83207 100644 +--- a/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml ++++ b/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml +@@ -15,12 +15,19 @@ allOf: + + properties: + compatible: +- enum: +- - nxp,imx8dxl-fspi +- - nxp,imx8mm-fspi +- - nxp,imx8mp-fspi +- - nxp,imx8qxp-fspi +- - nxp,lx2160a-fspi ++ oneOf: ++ - enum: ++ - nxp,imx8dxl-fspi ++ - nxp,imx8mm-fspi ++ - nxp,imx8mp-fspi ++ - nxp,imx8qxp-fspi ++ - nxp,imx8ulp-fspi ++ - nxp,lx2160a-fspi ++ - items: ++ - enum: ++ - nxp,imx93-fspi ++ - nxp,imx95-fspi ++ - const: nxp,imx8mm-fspi + + reg: + items: +diff --git a/Documentation/driver-api/ipmi.rst b/Documentation/driver-api/ipmi.rst +index e224e47b6b0944..dfa021eacd63c4 100644 +--- a/Documentation/driver-api/ipmi.rst ++++ b/Documentation/driver-api/ipmi.rst +@@ -540,7 +540,7 @@ at module load time (for a module) with:: + alerts_broken + + The addresses are normal I2C addresses. The adapter is the string +-name of the adapter, as shown in /sys/class/i2c-adapter/i2c-/name. ++name of the adapter, as shown in /sys/bus/i2c/devices/i2c-/name. + It is *NOT* i2c- itself. Also, the comparison is done ignoring + spaces, so if the name is "This is an I2C chip" you can say + adapter_name=ThisisanI2cchip. This is because it's hard to pass in +diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst +index 3a034db5e55f89..887d9d2fed492b 100644 +--- a/Documentation/virt/kvm/locking.rst ++++ b/Documentation/virt/kvm/locking.rst +@@ -9,7 +9,7 @@ KVM Lock Overview + + The acquisition orders for mutexes are as follows: + +-- cpus_read_lock() is taken outside kvm_lock ++- cpus_read_lock() is taken outside kvm_lock and kvm_usage_lock + + - kvm->lock is taken outside vcpu->mutex + +@@ -24,6 +24,13 @@ The acquisition orders for mutexes are as follows: + are taken on the waiting side when modifying memslots, so MMU notifiers + must not take either kvm->slots_lock or kvm->slots_arch_lock. + ++cpus_read_lock() vs kvm_lock: ++ ++- Taking cpus_read_lock() outside of kvm_lock is problematic, despite that ++ being the official ordering, as it is quite easy to unknowingly trigger ++ cpus_read_lock() while holding kvm_lock. Use caution when walking vm_list, ++ e.g. avoid complex operations when possible. ++ + For SRCU: + + - ``synchronize_srcu(&kvm->srcu)`` is called inside critical sections +@@ -228,10 +235,17 @@ time it will be set using the Dirty tracking mechanism described above. + :Type: mutex + :Arch: any + :Protects: - vm_list +- - kvm_usage_count ++ ++``kvm_usage_lock`` ++^^^^^^^^^^^^^^^^^^ ++ ++:Type: mutex ++:Arch: any ++:Protects: - kvm_usage_count + - hardware virtualization enable/disable +-:Comment: KVM also disables CPU hotplug via cpus_read_lock() during +- enable/disable. ++:Comment: Exists because using kvm_lock leads to deadlock (see earlier comment ++ on cpus_read_lock() vs kvm_lock). Note, KVM also disables CPU hotplug via ++ cpus_read_lock() when enabling/disabling virtualization. + + ``kvm->mn_invalidate_lock`` + ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +@@ -291,11 +305,12 @@ time it will be set using the Dirty tracking mechanism described above. + wakeup. + + ``vendor_module_lock`` +-^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ++^^^^^^^^^^^^^^^^^^^^^^ + :Type: mutex + :Arch: x86 + :Protects: loading a vendor module (kvm_amd or kvm_intel) +-:Comment: Exists because using kvm_lock leads to deadlock. cpu_hotplug_lock is +- taken outside of kvm_lock, e.g. in KVM's CPU online/offline callbacks, and +- many operations need to take cpu_hotplug_lock when loading a vendor module, +- e.g. updating static calls. ++:Comment: Exists because using kvm_lock leads to deadlock. kvm_lock is taken ++ in notifiers, e.g. __kvmclock_cpufreq_notifier(), that may be invoked while ++ cpu_hotplug_lock is held, e.g. from cpufreq_boost_trigger_state(), and many ++ operations need to take cpu_hotplug_lock when loading a vendor module, e.g. ++ updating static calls. +diff --git a/Makefile b/Makefile +index 0158e14f0dd966..1e382bacd8eac0 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 53 ++SUBLEVEL = 54 + EXTRAVERSION = + NAME = Hurr durr I'ma ninja sloth + +diff --git a/arch/arm/boot/dts/microchip/sam9x60.dtsi b/arch/arm/boot/dts/microchip/sam9x60.dtsi +index 73d570a172690c..1705c96f4221e8 100644 +--- a/arch/arm/boot/dts/microchip/sam9x60.dtsi ++++ b/arch/arm/boot/dts/microchip/sam9x60.dtsi +@@ -1312,7 +1312,7 @@ rtt: rtc@fffffe20 { + compatible = "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt"; + reg = <0xfffffe20 0x20>; + interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; +- clocks = <&clk32k 0>; ++ clocks = <&clk32k 1>; + }; + + pit: timer@fffffe40 { +@@ -1338,7 +1338,7 @@ rtc: rtc@fffffea8 { + compatible = "microchip,sam9x60-rtc", "atmel,at91sam9x5-rtc"; + reg = <0xfffffea8 0x100>; + interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; +- clocks = <&clk32k 0>; ++ clocks = <&clk32k 1>; + }; + + watchdog: watchdog@ffffff80 { +diff --git a/arch/arm/boot/dts/microchip/sama7g5.dtsi b/arch/arm/boot/dts/microchip/sama7g5.dtsi +index 269e0a3ca269cd..7a95464bb78d83 100644 +--- a/arch/arm/boot/dts/microchip/sama7g5.dtsi ++++ b/arch/arm/boot/dts/microchip/sama7g5.dtsi +@@ -272,7 +272,7 @@ rtt: rtc@e001d020 { + compatible = "microchip,sama7g5-rtt", "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt"; + reg = <0xe001d020 0x30>; + interrupts = ; +- clocks = <&clk32k 0>; ++ clocks = <&clk32k 1>; + }; + + clk32k: clock-controller@e001d050 { +diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts b/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts +index 875ae699c5cb80..ce9f4c22672939 100644 +--- a/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts ++++ b/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts +@@ -366,7 +366,7 @@ MX6UL_PAD_ENET1_RX_ER__PWM8_OUT 0x110b0 + }; + + pinctrl_tsc: tscgrp { +- fsl,pin = < ++ fsl,pins = < + MX6UL_PAD_GPIO1_IO01__GPIO1_IO01 0xb0 + MX6UL_PAD_GPIO1_IO02__GPIO1_IO02 0xb0 + MX6UL_PAD_GPIO1_IO03__GPIO1_IO03 0xb0 +diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts b/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts +index 521493342fe972..8f5566027c25a2 100644 +--- a/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts ++++ b/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts +@@ -350,7 +350,7 @@ MX7D_PAD_SD3_RESET_B__SD3_RESET_B 0x59 + + &iomuxc_lpsr { + pinctrl_enet1_phy_interrupt: enet1phyinterruptgrp { +- fsl,phy = < ++ fsl,pins = < + MX7D_PAD_LPSR_GPIO1_IO02__GPIO1_IO2 0x08 + >; + }; +diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c +index 85a496ddc6197e..e9f72a529b5089 100644 +--- a/arch/arm/mach-ep93xx/clock.c ++++ b/arch/arm/mach-ep93xx/clock.c +@@ -359,7 +359,7 @@ static unsigned long ep93xx_div_recalc_rate(struct clk_hw *hw, + u32 val = __raw_readl(psc->reg); + u8 index = (val & psc->mask) >> psc->shift; + +- if (index > psc->num_div) ++ if (index >= psc->num_div) + return 0; + + return DIV_ROUND_UP_ULL(parent_rate, psc->div[index]); +diff --git a/arch/arm/mach-versatile/platsmp-realview.c b/arch/arm/mach-versatile/platsmp-realview.c +index 5d363385c80192..059d796b26bc8e 100644 +--- a/arch/arm/mach-versatile/platsmp-realview.c ++++ b/arch/arm/mach-versatile/platsmp-realview.c +@@ -66,6 +66,7 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus) + return; + } + map = syscon_node_to_regmap(np); ++ of_node_put(np); + if (IS_ERR(map)) { + pr_err("PLATSMP: No syscon regmap\n"); + return; +diff --git a/arch/arm/vfp/vfpinstr.h b/arch/arm/vfp/vfpinstr.h +index 3c7938fd40aad6..32090b0fb250b8 100644 +--- a/arch/arm/vfp/vfpinstr.h ++++ b/arch/arm/vfp/vfpinstr.h +@@ -64,33 +64,37 @@ + + #ifdef CONFIG_AS_VFP_VMRS_FPINST + +-#define fmrx(_vfp_) ({ \ +- u32 __v; \ +- asm(".fpu vfpv2\n" \ +- "vmrs %0, " #_vfp_ \ +- : "=r" (__v) : : "cc"); \ +- __v; \ +- }) +- +-#define fmxr(_vfp_,_var_) \ +- asm(".fpu vfpv2\n" \ +- "vmsr " #_vfp_ ", %0" \ +- : : "r" (_var_) : "cc") ++#define fmrx(_vfp_) ({ \ ++ u32 __v; \ ++ asm volatile (".fpu vfpv2\n" \ ++ "vmrs %0, " #_vfp_ \ ++ : "=r" (__v) : : "cc"); \ ++ __v; \ ++}) ++ ++#define fmxr(_vfp_, _var_) ({ \ ++ asm volatile (".fpu vfpv2\n" \ ++ "vmsr " #_vfp_ ", %0" \ ++ : : "r" (_var_) : "cc"); \ ++}) + + #else + + #define vfpreg(_vfp_) #_vfp_ + +-#define fmrx(_vfp_) ({ \ +- u32 __v; \ +- asm("mrc p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmrx %0, " #_vfp_ \ +- : "=r" (__v) : : "cc"); \ +- __v; \ +- }) +- +-#define fmxr(_vfp_,_var_) \ +- asm("mcr p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmxr " #_vfp_ ", %0" \ +- : : "r" (_var_) : "cc") ++#define fmrx(_vfp_) ({ \ ++ u32 __v; \ ++ asm volatile ("mrc p10, 7, %0, " vfpreg(_vfp_) "," \ ++ "cr0, 0 @ fmrx %0, " #_vfp_ \ ++ : "=r" (__v) : : "cc"); \ ++ __v; \ ++}) ++ ++#define fmxr(_vfp_, _var_) ({ \ ++ asm volatile ("mcr p10, 7, %0, " vfpreg(_vfp_) "," \ ++ "cr0, 0 @ fmxr " #_vfp_ ", %0" \ ++ : : "r" (_var_) : "cc"); \ ++}) + + #endif + +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig +index 9e0c1ac3d13eed..5ea7b331967108 100644 +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -420,7 +420,7 @@ config AMPERE_ERRATUM_AC03_CPU_38 + default y + help + This option adds an alternative code sequence to work around Ampere +- erratum AC03_CPU_38 on AmpereOne. ++ errata AC03_CPU_38 and AC04_CPU_10 on AmpereOne. + + The affected design reports FEAT_HAFDBS as not implemented in + ID_AA64MMFR1_EL1.HAFDBS, but (V)TCR_ELx.{HA,HD} are not RES0 +diff --git a/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts b/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts +index 47a389d9ff7d71..9d74fa6bfed9fb 100644 +--- a/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts ++++ b/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts +@@ -32,7 +32,7 @@ memory@80000000 { + device_type = "memory"; + reg = <0x0 0x80000000 0x3da00000>, + <0x0 0xc0000000 0x40000000>, +- <0x8 0x80000000 0x40000000>; ++ <0x8 0x80000000 0x80000000>; + }; + + gpio-keys { +diff --git a/arch/arm64/boot/dts/mediatek/mt8186.dtsi b/arch/arm64/boot/dts/mediatek/mt8186.dtsi +index 84ec6c1aa12b9e..2c184f9e0fc390 100644 +--- a/arch/arm64/boot/dts/mediatek/mt8186.dtsi ++++ b/arch/arm64/boot/dts/mediatek/mt8186.dtsi +@@ -731,7 +731,7 @@ opp-850000000 { + opp-900000000-3 { + opp-hz = /bits/ 64 <900000000>; + opp-microvolt = <850000>; +- opp-supported-hw = <0x8>; ++ opp-supported-hw = <0xcf>; + }; + + opp-900000000-4 { +@@ -743,13 +743,13 @@ opp-900000000-4 { + opp-900000000-5 { + opp-hz = /bits/ 64 <900000000>; + opp-microvolt = <825000>; +- opp-supported-hw = <0x30>; ++ opp-supported-hw = <0x20>; + }; + + opp-950000000-3 { + opp-hz = /bits/ 64 <950000000>; + opp-microvolt = <900000>; +- opp-supported-hw = <0x8>; ++ opp-supported-hw = <0xcf>; + }; + + opp-950000000-4 { +@@ -761,13 +761,13 @@ opp-950000000-4 { + opp-950000000-5 { + opp-hz = /bits/ 64 <950000000>; + opp-microvolt = <850000>; +- opp-supported-hw = <0x30>; ++ opp-supported-hw = <0x20>; + }; + + opp-1000000000-3 { + opp-hz = /bits/ 64 <1000000000>; + opp-microvolt = <950000>; +- opp-supported-hw = <0x8>; ++ opp-supported-hw = <0xcf>; + }; + + opp-1000000000-4 { +@@ -779,7 +779,7 @@ opp-1000000000-4 { + opp-1000000000-5 { + opp-hz = /bits/ 64 <1000000000>; + opp-microvolt = <875000>; +- opp-supported-hw = <0x30>; ++ opp-supported-hw = <0x20>; + }; + }; + +diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi +index b78f408110bf74..34e18eb5d7f450 100644 +--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi ++++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi +@@ -1312,6 +1312,7 @@ &xhci3 { + usb2-lpm-disable; + vusb33-supply = <&mt6359_vusb_ldo_reg>; + vbus-supply = <&usb_vbus>; ++ mediatek,u3p-dis-msk = <1>; + }; + + #include +diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi +index 20e6d90cc4118e..d21ba00a5bd5df 100644 +--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi ++++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi +@@ -2766,10 +2766,10 @@ dp_intf0: dp-intf@1c015000 { + compatible = "mediatek,mt8195-dp-intf"; + reg = <0 0x1c015000 0 0x1000>; + interrupts = ; +- clocks = <&vdosys0 CLK_VDO0_DP_INTF0>, +- <&vdosys0 CLK_VDO0_DP_INTF0_DP_INTF>, ++ clocks = <&vdosys0 CLK_VDO0_DP_INTF0_DP_INTF>, ++ <&vdosys0 CLK_VDO0_DP_INTF0>, + <&apmixedsys CLK_APMIXED_TVDPLL1>; +- clock-names = "engine", "pixel", "pll"; ++ clock-names = "pixel", "engine", "pll"; + status = "disabled"; + }; + +@@ -3036,10 +3036,10 @@ dp_intf1: dp-intf@1c113000 { + reg = <0 0x1c113000 0 0x1000>; + interrupts = ; + power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>; +- clocks = <&vdosys1 CLK_VDO1_DP_INTF0_MM>, +- <&vdosys1 CLK_VDO1_DPINTF>, ++ clocks = <&vdosys1 CLK_VDO1_DPINTF>, ++ <&vdosys1 CLK_VDO1_DP_INTF0_MM>, + <&apmixedsys CLK_APMIXED_TVDPLL2>; +- clock-names = "engine", "pixel", "pll"; ++ clock-names = "pixel", "engine", "pll"; + status = "disabled"; + }; + +diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi +index 44bea063aedba3..f6766fa8df34d3 100644 +--- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi ++++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi +@@ -1951,6 +1951,7 @@ apps_smmu: iommu@15000000 { + reg = <0x0 0x15000000 0x0 0x100000>; + #iommu-cells = <2>; + #global-interrupts = <2>; ++ dma-coherent; + + interrupts = , + , +@@ -2089,6 +2090,7 @@ pcie_smmu: iommu@15200000 { + reg = <0x0 0x15200000 0x0 0x80000>; + #iommu-cells = <2>; + #global-interrupts = <2>; ++ dma-coherent; + + interrupts = , + , +diff --git a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi +index 4b72de43b71ccc..71d51febabc1e8 100644 +--- a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi ++++ b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi +@@ -145,8 +145,8 @@ gic: interrupt-controller@11900000 { + #interrupt-cells = <3>; + #address-cells = <0>; + interrupt-controller; +- reg = <0x0 0x11900000 0 0x40000>, +- <0x0 0x11940000 0 0x60000>; ++ reg = <0x0 0x11900000 0 0x20000>, ++ <0x0 0x11940000 0 0x40000>; + interrupts = ; + }; + }; +diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi +index a877738c304849..edc942c8463959 100644 +--- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi ++++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi +@@ -997,8 +997,8 @@ gic: interrupt-controller@11900000 { + #interrupt-cells = <3>; + #address-cells = <0>; + interrupt-controller; +- reg = <0x0 0x11900000 0 0x40000>, +- <0x0 0x11940000 0 0x60000>; ++ reg = <0x0 0x11900000 0 0x20000>, ++ <0x0 0x11940000 0 0x40000>; + interrupts = ; + }; + +diff --git a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi +index 3f01b096cfb717..d61f7894e55cdd 100644 +--- a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi ++++ b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi +@@ -1004,8 +1004,8 @@ gic: interrupt-controller@11900000 { + #interrupt-cells = <3>; + #address-cells = <0>; + interrupt-controller; +- reg = <0x0 0x11900000 0 0x40000>, +- <0x0 0x11940000 0 0x60000>; ++ reg = <0x0 0x11900000 0 0x20000>, ++ <0x0 0x11940000 0 0x40000>; + interrupts = ; + }; + +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts +index 294eb2de263deb..f5e124b235c83c 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts ++++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts +@@ -32,12 +32,12 @@ chosen { + backlight: edp-backlight { + compatible = "pwm-backlight"; + power-supply = <&vcc_12v>; +- pwms = <&pwm0 0 740740 0>; ++ pwms = <&pwm0 0 125000 0>; + }; + + bat: battery { + compatible = "simple-battery"; +- charge-full-design-microamp-hours = <9800000>; ++ charge-full-design-microamp-hours = <10000000>; + voltage-max-design-microvolt = <4350000>; + voltage-min-design-microvolt = <3000000>; + }; +diff --git a/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts b/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts +index a337f547caf538..6a02db4f073f29 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts ++++ b/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts +@@ -13,7 +13,7 @@ + + / { + model = "Hardkernel ODROID-M1"; +- compatible = "rockchip,rk3568-odroid-m1", "rockchip,rk3568"; ++ compatible = "hardkernel,odroid-m1", "rockchip,rk3568"; + + aliases { + ethernet0 = &gmac0; +diff --git a/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts b/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts +index 2f954729f35338..7897323376a5b9 100644 +--- a/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts ++++ b/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts +@@ -123,7 +123,7 @@ main_r5fss1_core1_memory_region: r5f-memory@a5100000 { + no-map; + }; + +- c66_1_dma_memory_region: c66-dma-memory@a6000000 { ++ c66_0_dma_memory_region: c66-dma-memory@a6000000 { + compatible = "shared-dma-pool"; + reg = <0x00 0xa6000000 0x00 0x100000>; + no-map; +@@ -135,7 +135,7 @@ c66_0_memory_region: c66-memory@a6100000 { + no-map; + }; + +- c66_0_dma_memory_region: c66-dma-memory@a7000000 { ++ c66_1_dma_memory_region: c66-dma-memory@a7000000 { + compatible = "shared-dma-pool"; + reg = <0x00 0xa7000000 0x00 0x100000>; + no-map; +diff --git a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts +index 42fe8eee9ec8c7..ccacb65683b5b0 100644 +--- a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts ++++ b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts +@@ -119,7 +119,7 @@ main_r5fss1_core1_memory_region: r5f-memory@a5100000 { + no-map; + }; + +- c66_1_dma_memory_region: c66-dma-memory@a6000000 { ++ c66_0_dma_memory_region: c66-dma-memory@a6000000 { + compatible = "shared-dma-pool"; + reg = <0x00 0xa6000000 0x00 0x100000>; + no-map; +@@ -131,7 +131,7 @@ c66_0_memory_region: c66-memory@a6100000 { + no-map; + }; + +- c66_0_dma_memory_region: c66-dma-memory@a7000000 { ++ c66_1_dma_memory_region: c66-dma-memory@a7000000 { + compatible = "shared-dma-pool"; + reg = <0x00 0xa7000000 0x00 0x100000>; + no-map; +diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h +index 5fd7caea441936..5a7dfeb8e8eb55 100644 +--- a/arch/arm64/include/asm/cputype.h ++++ b/arch/arm64/include/asm/cputype.h +@@ -143,6 +143,7 @@ + #define APPLE_CPU_PART_M2_AVALANCHE_MAX 0x039 + + #define AMPERE_CPU_PART_AMPERE1 0xAC3 ++#define AMPERE_CPU_PART_AMPERE1A 0xAC4 + + #define MICROSOFT_CPU_PART_AZURE_COBALT_100 0xD49 /* Based on r0p0 of ARM Neoverse N2 */ + +@@ -212,6 +213,7 @@ + #define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX) + #define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX) + #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1) ++#define MIDR_AMPERE1A MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1A) + #define MIDR_MICROSOFT_AZURE_COBALT_100 MIDR_CPU_MODEL(ARM_CPU_IMP_MICROSOFT, MICROSOFT_CPU_PART_AZURE_COBALT_100) + + /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ +diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h +index ae35939f395bb1..1cdae1b4f03bee 100644 +--- a/arch/arm64/include/asm/esr.h ++++ b/arch/arm64/include/asm/esr.h +@@ -10,63 +10,63 @@ + #include + #include + +-#define ESR_ELx_EC_UNKNOWN (0x00) +-#define ESR_ELx_EC_WFx (0x01) ++#define ESR_ELx_EC_UNKNOWN UL(0x00) ++#define ESR_ELx_EC_WFx UL(0x01) + /* Unallocated EC: 0x02 */ +-#define ESR_ELx_EC_CP15_32 (0x03) +-#define ESR_ELx_EC_CP15_64 (0x04) +-#define ESR_ELx_EC_CP14_MR (0x05) +-#define ESR_ELx_EC_CP14_LS (0x06) +-#define ESR_ELx_EC_FP_ASIMD (0x07) +-#define ESR_ELx_EC_CP10_ID (0x08) /* EL2 only */ +-#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */ ++#define ESR_ELx_EC_CP15_32 UL(0x03) ++#define ESR_ELx_EC_CP15_64 UL(0x04) ++#define ESR_ELx_EC_CP14_MR UL(0x05) ++#define ESR_ELx_EC_CP14_LS UL(0x06) ++#define ESR_ELx_EC_FP_ASIMD UL(0x07) ++#define ESR_ELx_EC_CP10_ID UL(0x08) /* EL2 only */ ++#define ESR_ELx_EC_PAC UL(0x09) /* EL2 and above */ + /* Unallocated EC: 0x0A - 0x0B */ +-#define ESR_ELx_EC_CP14_64 (0x0C) +-#define ESR_ELx_EC_BTI (0x0D) +-#define ESR_ELx_EC_ILL (0x0E) ++#define ESR_ELx_EC_CP14_64 UL(0x0C) ++#define ESR_ELx_EC_BTI UL(0x0D) ++#define ESR_ELx_EC_ILL UL(0x0E) + /* Unallocated EC: 0x0F - 0x10 */ +-#define ESR_ELx_EC_SVC32 (0x11) +-#define ESR_ELx_EC_HVC32 (0x12) /* EL2 only */ +-#define ESR_ELx_EC_SMC32 (0x13) /* EL2 and above */ ++#define ESR_ELx_EC_SVC32 UL(0x11) ++#define ESR_ELx_EC_HVC32 UL(0x12) /* EL2 only */ ++#define ESR_ELx_EC_SMC32 UL(0x13) /* EL2 and above */ + /* Unallocated EC: 0x14 */ +-#define ESR_ELx_EC_SVC64 (0x15) +-#define ESR_ELx_EC_HVC64 (0x16) /* EL2 and above */ +-#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */ +-#define ESR_ELx_EC_SYS64 (0x18) +-#define ESR_ELx_EC_SVE (0x19) +-#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */ ++#define ESR_ELx_EC_SVC64 UL(0x15) ++#define ESR_ELx_EC_HVC64 UL(0x16) /* EL2 and above */ ++#define ESR_ELx_EC_SMC64 UL(0x17) /* EL2 and above */ ++#define ESR_ELx_EC_SYS64 UL(0x18) ++#define ESR_ELx_EC_SVE UL(0x19) ++#define ESR_ELx_EC_ERET UL(0x1a) /* EL2 only */ + /* Unallocated EC: 0x1B */ +-#define ESR_ELx_EC_FPAC (0x1C) /* EL1 and above */ +-#define ESR_ELx_EC_SME (0x1D) ++#define ESR_ELx_EC_FPAC UL(0x1C) /* EL1 and above */ ++#define ESR_ELx_EC_SME UL(0x1D) + /* Unallocated EC: 0x1E */ +-#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */ +-#define ESR_ELx_EC_IABT_LOW (0x20) +-#define ESR_ELx_EC_IABT_CUR (0x21) +-#define ESR_ELx_EC_PC_ALIGN (0x22) ++#define ESR_ELx_EC_IMP_DEF UL(0x1f) /* EL3 only */ ++#define ESR_ELx_EC_IABT_LOW UL(0x20) ++#define ESR_ELx_EC_IABT_CUR UL(0x21) ++#define ESR_ELx_EC_PC_ALIGN UL(0x22) + /* Unallocated EC: 0x23 */ +-#define ESR_ELx_EC_DABT_LOW (0x24) +-#define ESR_ELx_EC_DABT_CUR (0x25) +-#define ESR_ELx_EC_SP_ALIGN (0x26) +-#define ESR_ELx_EC_MOPS (0x27) +-#define ESR_ELx_EC_FP_EXC32 (0x28) ++#define ESR_ELx_EC_DABT_LOW UL(0x24) ++#define ESR_ELx_EC_DABT_CUR UL(0x25) ++#define ESR_ELx_EC_SP_ALIGN UL(0x26) ++#define ESR_ELx_EC_MOPS UL(0x27) ++#define ESR_ELx_EC_FP_EXC32 UL(0x28) + /* Unallocated EC: 0x29 - 0x2B */ +-#define ESR_ELx_EC_FP_EXC64 (0x2C) ++#define ESR_ELx_EC_FP_EXC64 UL(0x2C) + /* Unallocated EC: 0x2D - 0x2E */ +-#define ESR_ELx_EC_SERROR (0x2F) +-#define ESR_ELx_EC_BREAKPT_LOW (0x30) +-#define ESR_ELx_EC_BREAKPT_CUR (0x31) +-#define ESR_ELx_EC_SOFTSTP_LOW (0x32) +-#define ESR_ELx_EC_SOFTSTP_CUR (0x33) +-#define ESR_ELx_EC_WATCHPT_LOW (0x34) +-#define ESR_ELx_EC_WATCHPT_CUR (0x35) ++#define ESR_ELx_EC_SERROR UL(0x2F) ++#define ESR_ELx_EC_BREAKPT_LOW UL(0x30) ++#define ESR_ELx_EC_BREAKPT_CUR UL(0x31) ++#define ESR_ELx_EC_SOFTSTP_LOW UL(0x32) ++#define ESR_ELx_EC_SOFTSTP_CUR UL(0x33) ++#define ESR_ELx_EC_WATCHPT_LOW UL(0x34) ++#define ESR_ELx_EC_WATCHPT_CUR UL(0x35) + /* Unallocated EC: 0x36 - 0x37 */ +-#define ESR_ELx_EC_BKPT32 (0x38) ++#define ESR_ELx_EC_BKPT32 UL(0x38) + /* Unallocated EC: 0x39 */ +-#define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */ ++#define ESR_ELx_EC_VECTOR32 UL(0x3A) /* EL2 only */ + /* Unallocated EC: 0x3B */ +-#define ESR_ELx_EC_BRK64 (0x3C) ++#define ESR_ELx_EC_BRK64 UL(0x3C) + /* Unallocated EC: 0x3D - 0x3F */ +-#define ESR_ELx_EC_MAX (0x3F) ++#define ESR_ELx_EC_MAX UL(0x3F) + + #define ESR_ELx_EC_SHIFT (26) + #define ESR_ELx_EC_WIDTH (6) +diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h +index f23c1dc3f002fe..8f003db7a6967a 100644 +--- a/arch/arm64/include/uapi/asm/sigcontext.h ++++ b/arch/arm64/include/uapi/asm/sigcontext.h +@@ -312,10 +312,10 @@ struct zt_context { + ((sizeof(struct za_context) + (__SVE_VQ_BYTES - 1)) \ + / __SVE_VQ_BYTES * __SVE_VQ_BYTES) + +-#define ZA_SIG_REGS_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES)) ++#define ZA_SIG_REGS_SIZE(vq) (((vq) * __SVE_VQ_BYTES) * ((vq) * __SVE_VQ_BYTES)) + + #define ZA_SIG_ZAV_OFFSET(vq, n) (ZA_SIG_REGS_OFFSET + \ +- (SVE_SIG_ZREG_SIZE(vq) * n)) ++ (SVE_SIG_ZREG_SIZE(vq) * (n))) + + #define ZA_SIG_CONTEXT_SIZE(vq) \ + (ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq)) +@@ -326,7 +326,7 @@ struct zt_context { + + #define ZT_SIG_REGS_OFFSET sizeof(struct zt_context) + +-#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * n) ++#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * (n)) + + #define ZT_SIG_CONTEXT_SIZE(n) \ + (sizeof(struct zt_context) + ZT_SIG_REGS_SIZE(n)) +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c +index 57b1d6a68256b7..f8d94902fbb59c 100644 +--- a/arch/arm64/kernel/cpu_errata.c ++++ b/arch/arm64/kernel/cpu_errata.c +@@ -472,6 +472,14 @@ static const struct midr_range erratum_spec_ssbs_list[] = { + }; + #endif + ++#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38 ++static const struct midr_range erratum_ac03_cpu_38_list[] = { ++ MIDR_ALL_VERSIONS(MIDR_AMPERE1), ++ MIDR_ALL_VERSIONS(MIDR_AMPERE1A), ++ {}, ++}; ++#endif ++ + const struct arm64_cpu_capabilities arm64_errata[] = { + #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE + { +@@ -789,7 +797,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { + { + .desc = "AmpereOne erratum AC03_CPU_38", + .capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38, +- ERRATA_MIDR_ALL_VERSIONS(MIDR_AMPERE1), ++ ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list), + }, + #endif + { +diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c +index 6e4dba9eadef52..8d21ab904f1a98 100644 +--- a/arch/arm64/kvm/hyp/nvhe/ffa.c ++++ b/arch/arm64/kvm/hyp/nvhe/ffa.c +@@ -415,9 +415,9 @@ static void do_ffa_mem_frag_tx(struct arm_smccc_res *res, + return; + } + +-static __always_inline void do_ffa_mem_xfer(const u64 func_id, +- struct arm_smccc_res *res, +- struct kvm_cpu_context *ctxt) ++static void __do_ffa_mem_xfer(const u64 func_id, ++ struct arm_smccc_res *res, ++ struct kvm_cpu_context *ctxt) + { + DECLARE_REG(u32, len, ctxt, 1); + DECLARE_REG(u32, fraglen, ctxt, 2); +@@ -428,9 +428,6 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, + u32 offset, nr_ranges; + int ret = 0; + +- BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE && +- func_id != FFA_FN64_MEM_LEND); +- + if (addr_mbz || npages_mbz || fraglen > len || + fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) { + ret = FFA_RET_INVALID_PARAMETERS; +@@ -449,6 +446,11 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, + goto out_unlock; + } + ++ if (len > ffa_desc_buf.len) { ++ ret = FFA_RET_NO_MEMORY; ++ goto out_unlock; ++ } ++ + buf = hyp_buffers.tx; + memcpy(buf, host_buffers.tx, fraglen); + +@@ -498,6 +500,13 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, + goto out_unlock; + } + ++#define do_ffa_mem_xfer(fid, res, ctxt) \ ++ do { \ ++ BUILD_BUG_ON((fid) != FFA_FN64_MEM_SHARE && \ ++ (fid) != FFA_FN64_MEM_LEND); \ ++ __do_ffa_mem_xfer((fid), (res), (ctxt)); \ ++ } while (0); ++ + static void do_ffa_mem_reclaim(struct arm_smccc_res *res, + struct kvm_cpu_context *ctxt) + { +diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c +index e06ce147c0b7fc..fb87219fc3b469 100644 +--- a/arch/m68k/kernel/process.c ++++ b/arch/m68k/kernel/process.c +@@ -116,7 +116,7 @@ asmlinkage int m68k_clone(struct pt_regs *regs) + { + /* regs will be equal to current_pt_regs() */ + struct kernel_clone_args args = { +- .flags = regs->d1 & ~CSIGNAL, ++ .flags = (u32)(regs->d1) & ~CSIGNAL, + .pidfd = (int __user *)regs->d3, + .child_tid = (int __user *)regs->d4, + .parent_tid = (int __user *)regs->d3, +diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig +index 6fc2248ca56166..fccf742c55c2c3 100644 +--- a/arch/powerpc/crypto/Kconfig ++++ b/arch/powerpc/crypto/Kconfig +@@ -96,6 +96,7 @@ config CRYPTO_AES_PPC_SPE + + config CRYPTO_AES_GCM_P10 + tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)" ++ depends on BROKEN + depends on PPC64 && CPU_LITTLE_ENDIAN && VSX + select CRYPTO_LIB_AES + select CRYPTO_ALGAPI +diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h +index 2bc53c646ccd7d..83848b534cb171 100644 +--- a/arch/powerpc/include/asm/asm-compat.h ++++ b/arch/powerpc/include/asm/asm-compat.h +@@ -39,6 +39,12 @@ + #define STDX_BE stringify_in_c(stdbrx) + #endif + ++#ifdef CONFIG_CC_IS_CLANG ++#define DS_FORM_CONSTRAINT "Z<>" ++#else ++#define DS_FORM_CONSTRAINT "YZ<>" ++#endif ++ + #else /* 32-bit */ + + /* operations for longs and pointers */ +diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h +index 5bf6a4d49268c7..d1ea554c33ed7e 100644 +--- a/arch/powerpc/include/asm/atomic.h ++++ b/arch/powerpc/include/asm/atomic.h +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + + /* + * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with +@@ -197,7 +198,7 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v) + if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED)) + __asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter)); + else +- __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter)); ++ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : DS_FORM_CONSTRAINT (v->counter)); + + return t; + } +@@ -208,7 +209,7 @@ static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i) + if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED)) + __asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter)); + else +- __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i)); ++ __asm__ __volatile__("std%U0%X0 %1,%0" : "=" DS_FORM_CONSTRAINT (v->counter) : "r"(i)); + } + + #define ATOMIC64_OP(op, asm_op) \ +diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h +index ccc91bf9b03428..a81bd825087cda 100644 +--- a/arch/powerpc/include/asm/uaccess.h ++++ b/arch/powerpc/include/asm/uaccess.h +@@ -6,6 +6,7 @@ + #include + #include + #include ++#include + + #ifdef __powerpc64__ + /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ +@@ -92,12 +93,6 @@ __pu_failed: \ + : label) + #endif + +-#ifdef CONFIG_CC_IS_CLANG +-#define DS_FORM_CONSTRAINT "Z<>" +-#else +-#define DS_FORM_CONSTRAINT "YZ<>" +-#endif +- + #ifdef __powerpc64__ + #ifdef CONFIG_PPC_KERNEL_PREFIXED + #define __put_user_asm2_goto(x, ptr, label) \ +diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S +index 647b0b445e89db..0c94db8e2bdedb 100644 +--- a/arch/powerpc/kernel/head_8xx.S ++++ b/arch/powerpc/kernel/head_8xx.S +@@ -41,12 +41,12 @@ + #include "head_32.h" + + .macro compare_to_kernel_boundary scratch, addr +-#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000 ++#if CONFIG_TASK_SIZE <= 0x80000000 && MODULES_VADDR >= 0x80000000 + /* By simply checking Address >= 0x80000000, we know if its a kernel address */ + not. \scratch, \addr + #else + rlwinm \scratch, \addr, 16, 0xfff8 +- cmpli cr0, \scratch, PAGE_OFFSET@h ++ cmpli cr0, \scratch, TASK_SIZE@h + #endif + .endm + +@@ -404,7 +404,7 @@ FixupDAR:/* Entry point for dcbx workaround. */ + mfspr r10, SPRN_SRR0 + mtspr SPRN_MD_EPN, r10 + rlwinm r11, r10, 16, 0xfff8 +- cmpli cr1, r11, PAGE_OFFSET@h ++ cmpli cr1, r11, TASK_SIZE@h + mfspr r11, SPRN_M_TWB /* Get level 1 table */ + blt+ cr1, 3f + +diff --git a/arch/powerpc/kernel/vdso/gettimeofday.S b/arch/powerpc/kernel/vdso/gettimeofday.S +index 48fc6658053aa4..894cb939cd2b31 100644 +--- a/arch/powerpc/kernel/vdso/gettimeofday.S ++++ b/arch/powerpc/kernel/vdso/gettimeofday.S +@@ -38,11 +38,7 @@ + .else + addi r4, r5, VDSO_DATA_OFFSET + .endif +-#ifdef __powerpc64__ + bl CFUNC(DOTSYM(\funct)) +-#else +- bl \funct +-#endif + PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) + #ifdef __powerpc64__ + PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1) +diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c +index 32450163027874..a947dff35d6517 100644 +--- a/arch/powerpc/mm/nohash/8xx.c ++++ b/arch/powerpc/mm/nohash/8xx.c +@@ -149,11 +149,11 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) + + mmu_mapin_immr(); + +- mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true); ++ mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_X, true); + if (debug_pagealloc_enabled_or_kfence()) { + top = boundary; + } else { +- mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true); ++ mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_X, true); + mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true); + } + +diff --git a/arch/riscv/include/asm/kvm_vcpu_pmu.h b/arch/riscv/include/asm/kvm_vcpu_pmu.h +index 395518a1664e00..a50a1d23523fea 100644 +--- a/arch/riscv/include/asm/kvm_vcpu_pmu.h ++++ b/arch/riscv/include/asm/kvm_vcpu_pmu.h +@@ -10,6 +10,7 @@ + #define __KVM_VCPU_RISCV_PMU_H + + #include ++#include + #include + + #ifdef CONFIG_RISCV_PMU_SBI +@@ -57,11 +58,11 @@ struct kvm_pmu { + + #if defined(CONFIG_32BIT) + #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ +-{.base = CSR_CYCLEH, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, \ +-{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, ++{.base = CSR_CYCLEH, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, \ ++{.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, + #else + #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ +-{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, ++{.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, + #endif + + int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid); +@@ -92,8 +93,20 @@ void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu); + struct kvm_pmu { + }; + ++static inline int kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu *vcpu, unsigned int csr_num, ++ unsigned long *val, unsigned long new_val, ++ unsigned long wr_mask) ++{ ++ if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) { ++ *val = 0; ++ return KVM_INSN_CONTINUE_NEXT_SEPC; ++ } else { ++ return KVM_INSN_ILLEGAL_TRAP; ++ } ++} ++ + #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ +-{.base = 0, .count = 0, .func = NULL }, ++{.base = CSR_CYCLE, .count = 3, .func = kvm_riscv_vcpu_pmu_read_legacy }, + + static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {} + static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid) +diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c +index 3348a61de7d998..2932791e938821 100644 +--- a/arch/riscv/kernel/perf_callchain.c ++++ b/arch/riscv/kernel/perf_callchain.c +@@ -62,7 +62,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + perf_callchain_store(entry, regs->epc); + + fp = user_backtrace(entry, fp, regs->ra); +- while (fp && !(fp & 0x3) && entry->nr < entry->max_stack) ++ while (fp && !(fp & 0x7) && entry->nr < entry->max_stack) + fp = user_backtrace(entry, fp, 0); + } + +diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c +index 9cd97091c72330..7a7fe40d0930be 100644 +--- a/arch/riscv/kvm/vcpu_sbi.c ++++ b/arch/riscv/kvm/vcpu_sbi.c +@@ -91,8 +91,8 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run) + run->riscv_sbi.args[3] = cp->a3; + run->riscv_sbi.args[4] = cp->a4; + run->riscv_sbi.args[5] = cp->a5; +- run->riscv_sbi.ret[0] = cp->a0; +- run->riscv_sbi.ret[1] = cp->a1; ++ run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED; ++ run->riscv_sbi.ret[1] = 0; + } + + void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu, +diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c +index 006041fbb65f83..905ac8a3f7165c 100644 +--- a/arch/x86/coco/tdx/tdx.c ++++ b/arch/x86/coco/tdx/tdx.c +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + + /* MMIO direction */ + #define EPT_READ 0 +@@ -405,6 +406,11 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve) + return -EINVAL; + } + ++ if (!fault_in_kernel_space(ve->gla)) { ++ WARN_ONCE(1, "Access to userspace address is not supported"); ++ return -EINVAL; ++ } ++ + /* + * Reject EPT violation #VEs that split pages. + * +diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c +index cc5c6a3264967f..4110246aba12c3 100644 +--- a/arch/x86/events/intel/pt.c ++++ b/arch/x86/events/intel/pt.c +@@ -1602,6 +1602,7 @@ static void pt_event_stop(struct perf_event *event, int mode) + * see comment in intel_pt_interrupt(). + */ + WRITE_ONCE(pt->handle_nmi, 0); ++ barrier(); + + pt_config_stop(event); + +@@ -1653,11 +1654,10 @@ static long pt_event_snapshot_aux(struct perf_event *event, + return 0; + + /* +- * Here, handle_nmi tells us if the tracing is on ++ * There is no PT interrupt in this mode, so stop the trace and it will ++ * remain stopped while the buffer is copied. + */ +- if (READ_ONCE(pt->handle_nmi)) +- pt_config_stop(event); +- ++ pt_config_stop(event); + pt_read_offset(buf); + pt_update_head(pt); + +@@ -1669,11 +1669,10 @@ static long pt_event_snapshot_aux(struct perf_event *event, + ret = perf_output_copy_aux(&pt->handle, handle, from, to); + + /* +- * If the tracing was on when we turned up, restart it. +- * Compiler barrier not needed as we couldn't have been +- * preempted by anything that touches pt->handle_nmi. ++ * Here, handle_nmi tells us if the tracing was on. ++ * If the tracing was on, restart it. + */ +- if (pt->handle_nmi) ++ if (READ_ONCE(pt->handle_nmi)) + pt_config_start(event); + + return ret; +diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h +index f896eed4516c7e..529c36a98d9ea0 100644 +--- a/arch/x86/include/asm/acpi.h ++++ b/arch/x86/include/asm/acpi.h +@@ -165,6 +165,14 @@ void acpi_generic_reduced_hw_init(void); + void x86_default_set_root_pointer(u64 addr); + u64 x86_default_get_root_pointer(void); + ++#ifdef CONFIG_XEN_PV ++/* A Xen PV domain needs a special acpi_os_ioremap() handling. */ ++extern void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys, ++ acpi_size size); ++void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size); ++#define acpi_os_ioremap acpi_os_ioremap ++#endif ++ + #else /* !CONFIG_ACPI */ + + #define acpi_lapic 0 +diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h +index 66837b8c67f1a9..f2e245741afc2c 100644 +--- a/arch/x86/include/asm/hardirq.h ++++ b/arch/x86/include/asm/hardirq.h +@@ -63,7 +63,11 @@ extern u64 arch_irq_stat(void); + #define local_softirq_pending_ref pcpu_hot.softirq_pending + + #if IS_ENABLED(CONFIG_KVM_INTEL) +-static inline void kvm_set_cpu_l1tf_flush_l1d(void) ++/* ++ * This function is called from noinstr interrupt contexts ++ * and must be inlined to not get instrumentation. ++ */ ++static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void) + { + __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1); + } +@@ -78,7 +82,7 @@ static __always_inline bool kvm_get_cpu_l1tf_flush_l1d(void) + return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d); + } + #else /* !IS_ENABLED(CONFIG_KVM_INTEL) */ +-static inline void kvm_set_cpu_l1tf_flush_l1d(void) { } ++static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void) { } + #endif /* IS_ENABLED(CONFIG_KVM_INTEL) */ + + #endif /* _ASM_X86_HARDIRQ_H */ +diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h +index 13639e57e1f8af..10603e185111d5 100644 +--- a/arch/x86/include/asm/idtentry.h ++++ b/arch/x86/include/asm/idtentry.h +@@ -13,15 +13,18 @@ + + #include + ++typedef void (*idtentry_t)(struct pt_regs *regs); ++ + /** + * DECLARE_IDTENTRY - Declare functions for simple IDT entry points + * No error code pushed by hardware + * @vector: Vector number (ignored for C) + * @func: Function name of the entry point + * +- * Declares three functions: ++ * Declares four functions: + * - The ASM entry point: asm_##func + * - The XEN PV trap entry point: xen_##func (maybe unused) ++ * - The C handler called from the FRED event dispatcher (maybe unused) + * - The C handler called from the ASM entry point + * + * Note: This is the C variant of DECLARE_IDTENTRY(). As the name says it +@@ -31,6 +34,7 @@ + #define DECLARE_IDTENTRY(vector, func) \ + asmlinkage void asm_##func(void); \ + asmlinkage void xen_asm_##func(void); \ ++ void fred_##func(struct pt_regs *regs); \ + __visible void func(struct pt_regs *regs) + + /** +@@ -137,6 +141,17 @@ static __always_inline void __##func(struct pt_regs *regs, \ + #define DEFINE_IDTENTRY_RAW(func) \ + __visible noinstr void func(struct pt_regs *regs) + ++/** ++ * DEFINE_FREDENTRY_RAW - Emit code for raw FRED entry points ++ * @func: Function name of the entry point ++ * ++ * @func is called from the FRED event dispatcher with interrupts disabled. ++ * ++ * See @DEFINE_IDTENTRY_RAW for further details. ++ */ ++#define DEFINE_FREDENTRY_RAW(func) \ ++noinstr void fred_##func(struct pt_regs *regs) ++ + /** + * DECLARE_IDTENTRY_RAW_ERRORCODE - Declare functions for raw IDT entry points + * Error code pushed by hardware +@@ -197,8 +212,8 @@ __visible noinstr void func(struct pt_regs *regs, \ + irqentry_state_t state = irqentry_enter(regs); \ + u32 vector = (u32)(u8)error_code; \ + \ ++ kvm_set_cpu_l1tf_flush_l1d(); \ + instrumentation_begin(); \ +- kvm_set_cpu_l1tf_flush_l1d(); \ + run_irq_on_irqstack_cond(__##func, regs, vector); \ + instrumentation_end(); \ + irqentry_exit(regs, state); \ +@@ -233,17 +248,27 @@ static noinline void __##func(struct pt_regs *regs, u32 vector) + #define DEFINE_IDTENTRY_SYSVEC(func) \ + static void __##func(struct pt_regs *regs); \ + \ ++static __always_inline void instr_##func(struct pt_regs *regs) \ ++{ \ ++ run_sysvec_on_irqstack_cond(__##func, regs); \ ++} \ ++ \ + __visible noinstr void func(struct pt_regs *regs) \ + { \ + irqentry_state_t state = irqentry_enter(regs); \ + \ ++ kvm_set_cpu_l1tf_flush_l1d(); \ + instrumentation_begin(); \ +- kvm_set_cpu_l1tf_flush_l1d(); \ +- run_sysvec_on_irqstack_cond(__##func, regs); \ ++ instr_##func (regs); \ + instrumentation_end(); \ + irqentry_exit(regs, state); \ + } \ + \ ++void fred_##func(struct pt_regs *regs) \ ++{ \ ++ instr_##func (regs); \ ++} \ ++ \ + static noinline void __##func(struct pt_regs *regs) + + /** +@@ -260,19 +285,29 @@ static noinline void __##func(struct pt_regs *regs) + #define DEFINE_IDTENTRY_SYSVEC_SIMPLE(func) \ + static __always_inline void __##func(struct pt_regs *regs); \ + \ ++static __always_inline void instr_##func(struct pt_regs *regs) \ ++{ \ ++ __irq_enter_raw(); \ ++ __##func (regs); \ ++ __irq_exit_raw(); \ ++} \ ++ \ + __visible noinstr void func(struct pt_regs *regs) \ + { \ + irqentry_state_t state = irqentry_enter(regs); \ + \ ++ kvm_set_cpu_l1tf_flush_l1d(); \ + instrumentation_begin(); \ +- __irq_enter_raw(); \ +- kvm_set_cpu_l1tf_flush_l1d(); \ +- __##func (regs); \ +- __irq_exit_raw(); \ ++ instr_##func (regs); \ + instrumentation_end(); \ + irqentry_exit(regs, state); \ + } \ + \ ++void fred_##func(struct pt_regs *regs) \ ++{ \ ++ instr_##func (regs); \ ++} \ ++ \ + static __always_inline void __##func(struct pt_regs *regs) + + /** +@@ -410,15 +445,18 @@ __visible noinstr void func(struct pt_regs *regs, \ + /* C-Code mapping */ + #define DECLARE_IDTENTRY_NMI DECLARE_IDTENTRY_RAW + #define DEFINE_IDTENTRY_NMI DEFINE_IDTENTRY_RAW ++#define DEFINE_FREDENTRY_NMI DEFINE_FREDENTRY_RAW + + #ifdef CONFIG_X86_64 + #define DECLARE_IDTENTRY_MCE DECLARE_IDTENTRY_IST + #define DEFINE_IDTENTRY_MCE DEFINE_IDTENTRY_IST + #define DEFINE_IDTENTRY_MCE_USER DEFINE_IDTENTRY_NOIST ++#define DEFINE_FREDENTRY_MCE DEFINE_FREDENTRY_RAW + + #define DECLARE_IDTENTRY_DEBUG DECLARE_IDTENTRY_IST + #define DEFINE_IDTENTRY_DEBUG DEFINE_IDTENTRY_IST + #define DEFINE_IDTENTRY_DEBUG_USER DEFINE_IDTENTRY_NOIST ++#define DEFINE_FREDENTRY_DEBUG DEFINE_FREDENTRY_RAW + #endif + + #else /* !__ASSEMBLY__ */ +@@ -655,23 +693,36 @@ DECLARE_IDTENTRY(RESCHEDULE_VECTOR, sysvec_reschedule_ipi); + DECLARE_IDTENTRY_SYSVEC(REBOOT_VECTOR, sysvec_reboot); + DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_SINGLE_VECTOR, sysvec_call_function_single); + DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_VECTOR, sysvec_call_function); ++#else ++# define fred_sysvec_reschedule_ipi NULL ++# define fred_sysvec_reboot NULL ++# define fred_sysvec_call_function_single NULL ++# define fred_sysvec_call_function NULL + #endif + + #ifdef CONFIG_X86_LOCAL_APIC + # ifdef CONFIG_X86_MCE_THRESHOLD + DECLARE_IDTENTRY_SYSVEC(THRESHOLD_APIC_VECTOR, sysvec_threshold); ++# else ++# define fred_sysvec_threshold NULL + # endif + + # ifdef CONFIG_X86_MCE_AMD + DECLARE_IDTENTRY_SYSVEC(DEFERRED_ERROR_VECTOR, sysvec_deferred_error); ++# else ++# define fred_sysvec_deferred_error NULL + # endif + + # ifdef CONFIG_X86_THERMAL_VECTOR + DECLARE_IDTENTRY_SYSVEC(THERMAL_APIC_VECTOR, sysvec_thermal); ++# else ++# define fred_sysvec_thermal NULL + # endif + + # ifdef CONFIG_IRQ_WORK + DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR, sysvec_irq_work); ++# else ++# define fred_sysvec_irq_work NULL + # endif + #endif + +@@ -679,12 +730,16 @@ DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR, sysvec_irq_work); + DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_VECTOR, sysvec_kvm_posted_intr_ipi); + DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_WAKEUP_VECTOR, sysvec_kvm_posted_intr_wakeup_ipi); + DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_NESTED_VECTOR, sysvec_kvm_posted_intr_nested_ipi); ++#else ++# define fred_sysvec_kvm_posted_intr_ipi NULL ++# define fred_sysvec_kvm_posted_intr_wakeup_ipi NULL ++# define fred_sysvec_kvm_posted_intr_nested_ipi NULL + #endif + + #if IS_ENABLED(CONFIG_HYPERV) + DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_hyperv_callback); + DECLARE_IDTENTRY_SYSVEC(HYPERV_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment); +-DECLARE_IDTENTRY_SYSVEC(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0); ++DECLARE_IDTENTRY_SYSVEC(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0); + #endif + + #if IS_ENABLED(CONFIG_ACRN_GUEST) +diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c +index c55c0ef47a187e..49c39f5dc1c992 100644 +--- a/arch/x86/kernel/acpi/boot.c ++++ b/arch/x86/kernel/acpi/boot.c +@@ -1901,3 +1901,14 @@ u64 x86_default_get_root_pointer(void) + { + return boot_params.acpi_rsdp_addr; + } ++ ++#ifdef CONFIG_XEN_PV ++void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size) ++{ ++ return ioremap_cache(phys, size); ++} ++ ++void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys, acpi_size size) = ++ x86_acpi_os_ioremap; ++EXPORT_SYMBOL_GPL(acpi_os_ioremap); ++#endif +diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c +index 166692f2d50111..c7f8c3200e8d7f 100644 +--- a/arch/x86/kernel/cpu/sgx/main.c ++++ b/arch/x86/kernel/cpu/sgx/main.c +@@ -474,24 +474,25 @@ struct sgx_epc_page *__sgx_alloc_epc_page(void) + { + struct sgx_epc_page *page; + int nid_of_current = numa_node_id(); +- int nid = nid_of_current; ++ int nid_start, nid; + +- if (node_isset(nid_of_current, sgx_numa_mask)) { +- page = __sgx_alloc_epc_page_from_node(nid_of_current); +- if (page) +- return page; +- } +- +- /* Fall back to the non-local NUMA nodes: */ +- while (true) { +- nid = next_node_in(nid, sgx_numa_mask); +- if (nid == nid_of_current) +- break; ++ /* ++ * Try local node first. If it doesn't have an EPC section, ++ * fall back to the non-local NUMA nodes. ++ */ ++ if (node_isset(nid_of_current, sgx_numa_mask)) ++ nid_start = nid_of_current; ++ else ++ nid_start = next_node_in(nid_of_current, sgx_numa_mask); + ++ nid = nid_start; ++ do { + page = __sgx_alloc_epc_page_from_node(nid); + if (page) + return page; +- } ++ ++ nid = next_node_in(nid, sgx_numa_mask); ++ } while (nid != nid_start); + + return ERR_PTR(-ENOMEM); + } +diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c +index 578d16fc040fa1..5481c7c5db301b 100644 +--- a/arch/x86/kernel/jailhouse.c ++++ b/arch/x86/kernel/jailhouse.c +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + #include + #include + #include +diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c +index c94dec6a18345a..1f54eedc3015e9 100644 +--- a/arch/x86/kernel/mmconf-fam10h_64.c ++++ b/arch/x86/kernel/mmconf-fam10h_64.c +@@ -9,6 +9,7 @@ + #include + #include + #include ++#include + + #include + #include +diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c +index 4989095ab7696a..d595ef7c1de05e 100644 +--- a/arch/x86/kernel/process_64.c ++++ b/arch/x86/kernel/process_64.c +@@ -750,6 +750,27 @@ static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr) + + #define LAM_U57_BITS 6 + ++static void enable_lam_func(void *__mm) ++{ ++ struct mm_struct *mm = __mm; ++ ++ if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm) { ++ write_cr3(__read_cr3() | mm->context.lam_cr3_mask); ++ set_tlbstate_lam_mode(mm); ++ } ++} ++ ++static void mm_enable_lam(struct mm_struct *mm) ++{ ++ /* ++ * Even though the process must still be single-threaded at this ++ * point, kernel threads may be using the mm. IPI those kernel ++ * threads if they exist. ++ */ ++ on_each_cpu_mask(mm_cpumask(mm), enable_lam_func, mm, true); ++ set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags); ++} ++ + static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits) + { + if (!cpu_feature_enabled(X86_FEATURE_LAM)) +@@ -766,6 +787,10 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits) + if (mmap_write_lock_killable(mm)) + return -EINTR; + ++ /* ++ * MM_CONTEXT_LOCK_LAM is set on clone. Prevent LAM from ++ * being enabled unless the process is single threaded: ++ */ + if (test_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags)) { + mmap_write_unlock(mm); + return -EBUSY; +@@ -782,9 +807,7 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits) + return -EINVAL; + } + +- write_cr3(__read_cr3() | mm->context.lam_cr3_mask); +- set_tlbstate_lam_mode(mm); +- set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags); ++ mm_enable_lam(mm); + + mmap_write_unlock(mm); + +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c +index 2a187c0cbd5b11..ce77dac9a0202a 100644 +--- a/arch/x86/kernel/smpboot.c ++++ b/arch/x86/kernel/smpboot.c +@@ -60,6 +60,7 @@ + #include + #include + #include ++#include + + #include + #include +diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c +index 3f0718b4a7d28b..268627a17cf0d8 100644 +--- a/arch/x86/kernel/x86_init.c ++++ b/arch/x86/kernel/x86_init.c +@@ -8,6 +8,7 @@ + #include + #include + #include ++#include + + #include + #include +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index 66a2c4c0ae106a..1380f34897770d 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -2443,6 +2443,29 @@ void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu) + } + EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi); + ++#define X2APIC_ICR_RESERVED_BITS (GENMASK_ULL(31, 20) | GENMASK_ULL(17, 16) | BIT(13)) ++ ++int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data) ++{ ++ if (data & X2APIC_ICR_RESERVED_BITS) ++ return 1; ++ ++ /* ++ * The BUSY bit is reserved on both Intel and AMD in x2APIC mode, but ++ * only AMD requires it to be zero, Intel essentially just ignores the ++ * bit. And if IPI virtualization (Intel) or x2AVIC (AMD) is enabled, ++ * the CPU performs the reserved bits checks, i.e. the underlying CPU ++ * behavior will "win". Arbitrarily clear the BUSY bit, as there is no ++ * sane way to provide consistent behavior with respect to hardware. ++ */ ++ data &= ~APIC_ICR_BUSY; ++ ++ kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32)); ++ kvm_lapic_set_reg64(apic, APIC_ICR, data); ++ trace_kvm_apic_write(APIC_ICR, data); ++ return 0; ++} ++ + /* emulate APIC access in a trap manner */ + void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) + { +@@ -2460,7 +2483,7 @@ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) + * maybe-unecessary write, and both are in the noise anyways. + */ + if (apic_x2apic_mode(apic) && offset == APIC_ICR) +- kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR)); ++ WARN_ON_ONCE(kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR))); + else + kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset)); + } +@@ -3153,16 +3176,6 @@ int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) + return 0; + } + +-int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data) +-{ +- data &= ~APIC_ICR_BUSY; +- +- kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32)); +- kvm_lapic_set_reg64(apic, APIC_ICR, data); +- trace_kvm_apic_write(APIC_ICR, data); +- return 0; +-} +- + static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data) + { + u32 low; +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c +index 453ea95b667dad..2fbae48f0b470a 100644 +--- a/arch/x86/mm/tlb.c ++++ b/arch/x86/mm/tlb.c +@@ -497,9 +497,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, + { + struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm); + u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); +- unsigned long new_lam = mm_lam_cr3_mask(next); + bool was_lazy = this_cpu_read(cpu_tlbstate_shared.is_lazy); + unsigned cpu = smp_processor_id(); ++ unsigned long new_lam; + u64 next_tlb_gen; + bool need_flush; + u16 new_asid; +@@ -622,9 +622,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, + cpumask_clear_cpu(cpu, mm_cpumask(real_prev)); + } + +- /* +- * Start remote flushes and then read tlb_gen. +- */ ++ /* Start receiving IPIs and then read tlb_gen (and LAM below) */ + if (next != &init_mm) + cpumask_set_cpu(cpu, mm_cpumask(next)); + next_tlb_gen = atomic64_read(&next->context.tlb_gen); +@@ -636,6 +634,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, + barrier(); + } + ++ new_lam = mm_lam_cr3_mask(next); + set_tlbstate_lam_mode(next); + if (need_flush) { + this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); +diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c +index b33afb240601b0..98a9bb92d75c88 100644 +--- a/arch/x86/pci/fixup.c ++++ b/arch/x86/pci/fixup.c +@@ -980,7 +980,7 @@ static void amd_rp_pme_suspend(struct pci_dev *dev) + return; + + rp = pcie_find_root_port(dev); +- if (!rp->pm_cap) ++ if (!rp || !rp->pm_cap) + return; + + rp->pme_support &= ~((PCI_PM_CAP_PME_D3hot|PCI_PM_CAP_PME_D3cold) >> +@@ -994,7 +994,7 @@ static void amd_rp_pme_resume(struct pci_dev *dev) + u16 pmc; + + rp = pcie_find_root_port(dev); +- if (!rp->pm_cap) ++ if (!rp || !rp->pm_cap) + return; + + pci_read_config_word(rp, rp->pm_cap + PCI_PM_PMC, &pmc); +diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c +index 9d4a9311e819bb..6b201e64d8abc8 100644 +--- a/arch/x86/xen/mmu_pv.c ++++ b/arch/x86/xen/mmu_pv.c +@@ -2019,10 +2019,7 @@ void __init xen_reserve_special_pages(void) + + void __init xen_pt_check_e820(void) + { +- if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) { +- xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n"); +- BUG(); +- } ++ xen_chk_is_e820_usable(xen_pt_base, xen_pt_size, "page table"); + } + + static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; +diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c +index 4c2bf989edafcb..11b5c042d4faef 100644 +--- a/arch/x86/xen/p2m.c ++++ b/arch/x86/xen/p2m.c +@@ -70,6 +70,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -80,6 +81,7 @@ + #include + #include + #include ++#include + + #include "multicalls.h" + #include "xen-ops.h" +@@ -794,6 +796,102 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, + return ret; + } + ++/* Remapped non-RAM areas */ ++#define NR_NONRAM_REMAP 4 ++static struct nonram_remap { ++ phys_addr_t maddr; ++ phys_addr_t paddr; ++ size_t size; ++} xen_nonram_remap[NR_NONRAM_REMAP] __ro_after_init; ++static unsigned int nr_nonram_remap __ro_after_init; ++ ++/* ++ * Do the real remapping of non-RAM regions as specified in the ++ * xen_nonram_remap[] array. ++ * In case of an error just crash the system. ++ */ ++void __init xen_do_remap_nonram(void) ++{ ++ unsigned int i; ++ unsigned int remapped = 0; ++ const struct nonram_remap *remap = xen_nonram_remap; ++ unsigned long pfn, mfn, end_pfn; ++ ++ for (i = 0; i < nr_nonram_remap; i++) { ++ end_pfn = PFN_UP(remap->paddr + remap->size); ++ pfn = PFN_DOWN(remap->paddr); ++ mfn = PFN_DOWN(remap->maddr); ++ while (pfn < end_pfn) { ++ if (!set_phys_to_machine(pfn, mfn)) ++ panic("Failed to set p2m mapping for pfn=%lx mfn=%lx\n", ++ pfn, mfn); ++ ++ pfn++; ++ mfn++; ++ remapped++; ++ } ++ ++ remap++; ++ } ++ ++ pr_info("Remapped %u non-RAM page(s)\n", remapped); ++} ++ ++#ifdef CONFIG_ACPI ++/* ++ * Xen variant of acpi_os_ioremap() taking potentially remapped non-RAM ++ * regions into account. ++ * Any attempt to map an area crossing a remap boundary will produce a ++ * WARN() splat. ++ * phys is related to remap->maddr on input and will be rebased to remap->paddr. ++ */ ++static void __iomem *xen_acpi_os_ioremap(acpi_physical_address phys, ++ acpi_size size) ++{ ++ unsigned int i; ++ const struct nonram_remap *remap = xen_nonram_remap; ++ ++ for (i = 0; i < nr_nonram_remap; i++) { ++ if (phys + size > remap->maddr && ++ phys < remap->maddr + remap->size) { ++ WARN_ON(phys < remap->maddr || ++ phys + size > remap->maddr + remap->size); ++ phys += remap->paddr - remap->maddr; ++ break; ++ } ++ } ++ ++ return x86_acpi_os_ioremap(phys, size); ++} ++#endif /* CONFIG_ACPI */ ++ ++/* ++ * Add a new non-RAM remap entry. ++ * In case of no free entry found, just crash the system. ++ */ ++void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr, ++ unsigned long size) ++{ ++ BUG_ON((maddr & ~PAGE_MASK) != (paddr & ~PAGE_MASK)); ++ ++ if (nr_nonram_remap == NR_NONRAM_REMAP) { ++ xen_raw_console_write("Number of required E820 entry remapping actions exceed maximum value\n"); ++ BUG(); ++ } ++ ++#ifdef CONFIG_ACPI ++ /* Switch to the Xen acpi_os_ioremap() variant. */ ++ if (nr_nonram_remap == 0) ++ acpi_os_ioremap = xen_acpi_os_ioremap; ++#endif ++ ++ xen_nonram_remap[nr_nonram_remap].maddr = maddr; ++ xen_nonram_remap[nr_nonram_remap].paddr = paddr; ++ xen_nonram_remap[nr_nonram_remap].size = size; ++ ++ nr_nonram_remap++; ++} ++ + #ifdef CONFIG_XEN_DEBUG_FS + #include + #include "debugfs.h" +diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c +index 380591028cb8f4..dc822124cacb9c 100644 +--- a/arch/x86/xen/setup.c ++++ b/arch/x86/xen/setup.c +@@ -15,12 +15,12 @@ + #include + #include + #include ++#include + + #include + #include + #include + #include +-#include + #include + #include + #include +@@ -47,6 +47,9 @@ bool xen_pv_pci_possible; + /* E820 map used during setting up memory. */ + static struct e820_table xen_e820_table __initdata; + ++/* Number of initially usable memory pages. */ ++static unsigned long ini_nr_pages __initdata; ++ + /* + * Buffer used to remap identity mapped pages. We only need the virtual space. + * The physical page behind this address is remapped as needed to different +@@ -213,7 +216,7 @@ static int __init xen_free_mfn(unsigned long mfn) + * as a fallback if the remapping fails. + */ + static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, +- unsigned long end_pfn, unsigned long nr_pages) ++ unsigned long end_pfn) + { + unsigned long pfn, end; + int ret; +@@ -221,7 +224,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, + WARN_ON(start_pfn > end_pfn); + + /* Release pages first. */ +- end = min(end_pfn, nr_pages); ++ end = min(end_pfn, ini_nr_pages); + for (pfn = start_pfn; pfn < end; pfn++) { + unsigned long mfn = pfn_to_mfn(pfn); + +@@ -342,15 +345,14 @@ static void __init xen_do_set_identity_and_remap_chunk( + * to Xen and not remapped. + */ + static unsigned long __init xen_set_identity_and_remap_chunk( +- unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, +- unsigned long remap_pfn) ++ unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pfn) + { + unsigned long pfn; + unsigned long i = 0; + unsigned long n = end_pfn - start_pfn; + + if (remap_pfn == 0) +- remap_pfn = nr_pages; ++ remap_pfn = ini_nr_pages; + + while (i < n) { + unsigned long cur_pfn = start_pfn + i; +@@ -359,19 +361,19 @@ static unsigned long __init xen_set_identity_and_remap_chunk( + unsigned long remap_range_size; + + /* Do not remap pages beyond the current allocation */ +- if (cur_pfn >= nr_pages) { ++ if (cur_pfn >= ini_nr_pages) { + /* Identity map remaining pages */ + set_phys_range_identity(cur_pfn, cur_pfn + size); + break; + } +- if (cur_pfn + size > nr_pages) +- size = nr_pages - cur_pfn; ++ if (cur_pfn + size > ini_nr_pages) ++ size = ini_nr_pages - cur_pfn; + + remap_range_size = xen_find_pfn_range(&remap_pfn); + if (!remap_range_size) { + pr_warn("Unable to find available pfn range, not remapping identity pages\n"); + xen_set_identity_and_release_chunk(cur_pfn, +- cur_pfn + left, nr_pages); ++ cur_pfn + left); + break; + } + /* Adjust size to fit in current e820 RAM region */ +@@ -398,18 +400,18 @@ static unsigned long __init xen_set_identity_and_remap_chunk( + } + + static unsigned long __init xen_count_remap_pages( +- unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, ++ unsigned long start_pfn, unsigned long end_pfn, + unsigned long remap_pages) + { +- if (start_pfn >= nr_pages) ++ if (start_pfn >= ini_nr_pages) + return remap_pages; + +- return remap_pages + min(end_pfn, nr_pages) - start_pfn; ++ return remap_pages + min(end_pfn, ini_nr_pages) - start_pfn; + } + +-static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages, ++static unsigned long __init xen_foreach_remap_area( + unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn, +- unsigned long nr_pages, unsigned long last_val)) ++ unsigned long last_val)) + { + phys_addr_t start = 0; + unsigned long ret_val = 0; +@@ -437,8 +439,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages, + end_pfn = PFN_UP(entry->addr); + + if (start_pfn < end_pfn) +- ret_val = func(start_pfn, end_pfn, nr_pages, +- ret_val); ++ ret_val = func(start_pfn, end_pfn, ret_val); + start = end; + } + } +@@ -495,6 +496,8 @@ void __init xen_remap_memory(void) + set_pte_mfn(buf, mfn_save, PAGE_KERNEL); + + pr_info("Remapped %ld page(s)\n", remapped); ++ ++ xen_do_remap_nonram(); + } + + static unsigned long __init xen_get_pages_limit(void) +@@ -568,7 +571,7 @@ static void __init xen_ignore_unusable(void) + } + } + +-bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) ++static bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) + { + struct e820_entry *entry; + unsigned mapcnt; +@@ -625,6 +628,111 @@ phys_addr_t __init xen_find_free_area(phys_addr_t size) + return 0; + } + ++/* ++ * Swap a non-RAM E820 map entry with RAM above ini_nr_pages. ++ * Note that the E820 map is modified accordingly, but the P2M map isn't yet. ++ * The adaption of the P2M must be deferred until page allocation is possible. ++ */ ++static void __init xen_e820_swap_entry_with_ram(struct e820_entry *swap_entry) ++{ ++ struct e820_entry *entry; ++ unsigned int mapcnt; ++ phys_addr_t mem_end = PFN_PHYS(ini_nr_pages); ++ phys_addr_t swap_addr, swap_size, entry_end; ++ ++ swap_addr = PAGE_ALIGN_DOWN(swap_entry->addr); ++ swap_size = PAGE_ALIGN(swap_entry->addr - swap_addr + swap_entry->size); ++ entry = xen_e820_table.entries; ++ ++ for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) { ++ entry_end = entry->addr + entry->size; ++ if (entry->type == E820_TYPE_RAM && entry->size >= swap_size && ++ entry_end - swap_size >= mem_end) { ++ /* Reduce RAM entry by needed space (whole pages). */ ++ entry->size -= swap_size; ++ ++ /* Add new entry at the end of E820 map. */ ++ entry = xen_e820_table.entries + ++ xen_e820_table.nr_entries; ++ xen_e820_table.nr_entries++; ++ ++ /* Fill new entry (keep size and page offset). */ ++ entry->type = swap_entry->type; ++ entry->addr = entry_end - swap_size + ++ swap_addr - swap_entry->addr; ++ entry->size = swap_entry->size; ++ ++ /* Convert old entry to RAM, align to pages. */ ++ swap_entry->type = E820_TYPE_RAM; ++ swap_entry->addr = swap_addr; ++ swap_entry->size = swap_size; ++ ++ /* Remember PFN<->MFN relation for P2M update. */ ++ xen_add_remap_nonram(swap_addr, entry_end - swap_size, ++ swap_size); ++ ++ /* Order E820 table and merge entries. */ ++ e820__update_table(&xen_e820_table); ++ ++ return; ++ } ++ ++ entry++; ++ } ++ ++ xen_raw_console_write("No suitable area found for required E820 entry remapping action\n"); ++ BUG(); ++} ++ ++/* ++ * Look for non-RAM memory types in a specific guest physical area and move ++ * those away if possible (ACPI NVS only for now). ++ */ ++static void __init xen_e820_resolve_conflicts(phys_addr_t start, ++ phys_addr_t size) ++{ ++ struct e820_entry *entry; ++ unsigned int mapcnt; ++ phys_addr_t end; ++ ++ if (!size) ++ return; ++ ++ end = start + size; ++ entry = xen_e820_table.entries; ++ ++ for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) { ++ if (entry->addr >= end) ++ return; ++ ++ if (entry->addr + entry->size > start && ++ entry->type == E820_TYPE_NVS) ++ xen_e820_swap_entry_with_ram(entry); ++ ++ entry++; ++ } ++} ++ ++/* ++ * Check for an area in physical memory to be usable for non-movable purposes. ++ * An area is considered to usable if the used E820 map lists it to be RAM or ++ * some other type which can be moved to higher PFNs while keeping the MFNs. ++ * In case the area is not usable, crash the system with an error message. ++ */ ++void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size, ++ const char *component) ++{ ++ xen_e820_resolve_conflicts(start, size); ++ ++ if (!xen_is_e820_reserved(start, size)) ++ return; ++ ++ xen_raw_console_write("Xen hypervisor allocated "); ++ xen_raw_console_write(component); ++ xen_raw_console_write(" memory conflicts with E820 map\n"); ++ BUG(); ++} ++ + /* + * Like memcpy, but with physical addresses for dest and src. + */ +@@ -684,20 +792,20 @@ static void __init xen_reserve_xen_mfnlist(void) + **/ + char * __init xen_memory_setup(void) + { +- unsigned long max_pfn, pfn_s, n_pfns; ++ unsigned long pfn_s, n_pfns; + phys_addr_t mem_end, addr, size, chunk_size; + u32 type; + int rc; + struct xen_memory_map memmap; + unsigned long max_pages; + unsigned long extra_pages = 0; ++ unsigned long maxmem_pages; + int i; + int op; + + xen_parse_512gb(); +- max_pfn = xen_get_pages_limit(); +- max_pfn = min(max_pfn, xen_start_info->nr_pages); +- mem_end = PFN_PHYS(max_pfn); ++ ini_nr_pages = min(xen_get_pages_limit(), xen_start_info->nr_pages); ++ mem_end = PFN_PHYS(ini_nr_pages); + + memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries); + set_xen_guest_handle(memmap.buffer, xen_e820_table.entries); +@@ -747,13 +855,35 @@ char * __init xen_memory_setup(void) + /* Make sure the Xen-supplied memory map is well-ordered. */ + e820__update_table(&xen_e820_table); + ++ /* ++ * Check whether the kernel itself conflicts with the target E820 map. ++ * Failing now is better than running into weird problems later due ++ * to relocating (and even reusing) pages with kernel text or data. ++ */ ++ xen_chk_is_e820_usable(__pa_symbol(_text), ++ __pa_symbol(_end) - __pa_symbol(_text), ++ "kernel"); ++ ++ /* ++ * Check for a conflict of the xen_start_info memory with the target ++ * E820 map. ++ */ ++ xen_chk_is_e820_usable(__pa(xen_start_info), sizeof(*xen_start_info), ++ "xen_start_info"); ++ ++ /* ++ * Check for a conflict of the hypervisor supplied page tables with ++ * the target E820 map. ++ */ ++ xen_pt_check_e820(); ++ + max_pages = xen_get_max_pages(); + + /* How many extra pages do we need due to remapping? */ +- max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages); ++ max_pages += xen_foreach_remap_area(xen_count_remap_pages); + +- if (max_pages > max_pfn) +- extra_pages += max_pages - max_pfn; ++ if (max_pages > ini_nr_pages) ++ extra_pages += max_pages - ini_nr_pages; + + /* + * Clamp the amount of extra memory to a EXTRA_MEM_RATIO +@@ -762,8 +892,8 @@ char * __init xen_memory_setup(void) + * Make sure we have no memory above max_pages, as this area + * isn't handled by the p2m management. + */ +- extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), +- extra_pages, max_pages - max_pfn); ++ maxmem_pages = EXTRA_MEM_RATIO * min(ini_nr_pages, PFN_DOWN(MAXMEM)); ++ extra_pages = min3(maxmem_pages, extra_pages, max_pages - ini_nr_pages); + i = 0; + addr = xen_e820_table.entries[0].addr; + size = xen_e820_table.entries[0].size; +@@ -819,23 +949,6 @@ char * __init xen_memory_setup(void) + + e820__update_table(e820_table); + +- /* +- * Check whether the kernel itself conflicts with the target E820 map. +- * Failing now is better than running into weird problems later due +- * to relocating (and even reusing) pages with kernel text or data. +- */ +- if (xen_is_e820_reserved(__pa_symbol(_text), +- __pa_symbol(__bss_stop) - __pa_symbol(_text))) { +- xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n"); +- BUG(); +- } +- +- /* +- * Check for a conflict of the hypervisor supplied page tables with +- * the target E820 map. +- */ +- xen_pt_check_e820(); +- + xen_reserve_xen_mfnlist(); + + /* Check for a conflict of the initrd with the target E820 map. */ +@@ -863,7 +976,7 @@ char * __init xen_memory_setup(void) + * Set identity map on non-RAM pages and prepare remapping the + * underlying RAM. + */ +- xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk); ++ xen_foreach_remap_area(xen_set_identity_and_remap_chunk); + + pr_info("Released %ld page(s)\n", xen_released_pages); + +diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h +index 79cf93f2c92f1d..a6a21dd0552700 100644 +--- a/arch/x86/xen/xen-ops.h ++++ b/arch/x86/xen/xen-ops.h +@@ -43,8 +43,12 @@ void xen_mm_unpin_all(void); + #ifdef CONFIG_X86_64 + void __init xen_relocate_p2m(void); + #endif ++void __init xen_do_remap_nonram(void); ++void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr, ++ unsigned long size); + +-bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size); ++void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size, ++ const char *component); + unsigned long __ref xen_chk_extra_mem(unsigned long pfn); + void __init xen_inv_extra_mem(void); + void __init xen_remap_memory(void); +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c +index 3cce6de464a7b7..7e0dcded5713a0 100644 +--- a/block/bfq-iosched.c ++++ b/block/bfq-iosched.c +@@ -2911,8 +2911,12 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, + struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx]; + + /* if a merge has already been setup, then proceed with that first */ +- if (bfqq->new_bfqq) +- return bfqq->new_bfqq; ++ new_bfqq = bfqq->new_bfqq; ++ if (new_bfqq) { ++ while (new_bfqq->new_bfqq) ++ new_bfqq = new_bfqq->new_bfqq; ++ return new_bfqq; ++ } + + /* + * Check delayed stable merge for rotational or non-queueing +@@ -3125,10 +3129,12 @@ void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq) + bfq_put_queue(bfqq); + } + +-static void +-bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, +- struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) ++static struct bfq_queue *bfq_merge_bfqqs(struct bfq_data *bfqd, ++ struct bfq_io_cq *bic, ++ struct bfq_queue *bfqq) + { ++ struct bfq_queue *new_bfqq = bfqq->new_bfqq; ++ + bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", + (unsigned long)new_bfqq->pid); + /* Save weight raising and idle window of the merged queues */ +@@ -3222,6 +3228,8 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, + bfq_reassign_last_bfqq(bfqq, new_bfqq); + + bfq_release_process_ref(bfqd, bfqq); ++ ++ return new_bfqq; + } + + static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, +@@ -3257,14 +3265,8 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, + * fulfilled, i.e., bic can be redirected to new_bfqq + * and bfqq can be put. + */ +- bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq, +- new_bfqq); +- /* +- * If we get here, bio will be queued into new_queue, +- * so use new_bfqq to decide whether bio and rq can be +- * merged. +- */ +- bfqq = new_bfqq; ++ while (bfqq != new_bfqq) ++ bfqq = bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq); + + /* + * Change also bqfd->bio_bfqq, as +@@ -5699,9 +5701,7 @@ bfq_do_early_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq, + * state before killing it. + */ + bfqq->bic = bic; +- bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq); +- +- return new_bfqq; ++ return bfq_merge_bfqqs(bfqd, bic, bfqq); + } + + /* +@@ -6156,6 +6156,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) + bool waiting, idle_timer_disabled = false; + + if (new_bfqq) { ++ struct bfq_queue *old_bfqq = bfqq; + /* + * Release the request's reference to the old bfqq + * and make sure one is taken to the shared queue. +@@ -6172,18 +6173,18 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) + * new_bfqq. + */ + if (bic_to_bfqq(RQ_BIC(rq), true, +- bfq_actuator_index(bfqd, rq->bio)) == bfqq) +- bfq_merge_bfqqs(bfqd, RQ_BIC(rq), +- bfqq, new_bfqq); ++ bfq_actuator_index(bfqd, rq->bio)) == bfqq) { ++ while (bfqq != new_bfqq) ++ bfqq = bfq_merge_bfqqs(bfqd, RQ_BIC(rq), bfqq); ++ } + +- bfq_clear_bfqq_just_created(bfqq); ++ bfq_clear_bfqq_just_created(old_bfqq); + /* + * rq is about to be enqueued into new_bfqq, + * release rq reference on bfqq + */ +- bfq_put_queue(bfqq); ++ bfq_put_queue(old_bfqq); + rq->elv.priv[1] = new_bfqq; +- bfqq = new_bfqq; + } + + bfq_update_io_thinktime(bfqd, bfqq); +@@ -6721,7 +6722,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) + { + bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); + +- if (bfqq_process_refs(bfqq) == 1) { ++ if (bfqq_process_refs(bfqq) == 1 && !bfqq->new_bfqq) { + bfqq->pid = current->pid; + bfq_clear_bfqq_coop(bfqq); + bfq_clear_bfqq_split_coop(bfqq); +@@ -6819,6 +6820,31 @@ static void bfq_prepare_request(struct request *rq) + rq->elv.priv[0] = rq->elv.priv[1] = NULL; + } + ++static struct bfq_queue *bfq_waker_bfqq(struct bfq_queue *bfqq) ++{ ++ struct bfq_queue *new_bfqq = bfqq->new_bfqq; ++ struct bfq_queue *waker_bfqq = bfqq->waker_bfqq; ++ ++ if (!waker_bfqq) ++ return NULL; ++ ++ while (new_bfqq) { ++ if (new_bfqq == waker_bfqq) { ++ /* ++ * If waker_bfqq is in the merge chain, and current ++ * is the only procress. ++ */ ++ if (bfqq_process_refs(waker_bfqq) == 1) ++ return NULL; ++ break; ++ } ++ ++ new_bfqq = new_bfqq->new_bfqq; ++ } ++ ++ return waker_bfqq; ++} ++ + /* + * If needed, init rq, allocate bfq data structures associated with + * rq, and increment reference counters in the destination bfq_queue +@@ -6880,7 +6906,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) + /* If the queue was seeky for too long, break it apart. */ + if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq) && + !bic->bfqq_data[a_idx].stably_merged) { +- struct bfq_queue *old_bfqq = bfqq; ++ struct bfq_queue *waker_bfqq = bfq_waker_bfqq(bfqq); + + /* Update bic before losing reference to bfqq */ + if (bfq_bfqq_in_large_burst(bfqq)) +@@ -6900,7 +6926,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) + bfqq_already_existing = true; + + if (!bfqq_already_existing) { +- bfqq->waker_bfqq = old_bfqq->waker_bfqq; ++ bfqq->waker_bfqq = waker_bfqq; + bfqq->tentative_waker_bfqq = NULL; + + /* +@@ -6910,7 +6936,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) + * woken_list of the waker. See + * bfq_check_waker for details. + */ +- if (bfqq->waker_bfqq) ++ if (waker_bfqq) + hlist_add_head(&bfqq->woken_list_node, + &bfqq->waker_bfqq->woken_list); + } +@@ -6932,7 +6958,8 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) + * addition, if the queue has also just been split, we have to + * resume its state. + */ +- if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { ++ if (likely(bfqq != &bfqd->oom_bfqq) && !bfqq->new_bfqq && ++ bfqq_process_refs(bfqq) == 1) { + bfqq->bic = bic; + if (split) { + /* +diff --git a/block/partitions/core.c b/block/partitions/core.c +index 962e4b57d64abe..fc0ab5d8ab705b 100644 +--- a/block/partitions/core.c ++++ b/block/partitions/core.c +@@ -574,9 +574,11 @@ static bool blk_add_partition(struct gendisk *disk, + + part = add_partition(disk, p, from, size, state->parts[p].flags, + &state->parts[p].info); +- if (IS_ERR(part) && PTR_ERR(part) != -ENXIO) { +- printk(KERN_ERR " %s: p%d could not be added: %ld\n", +- disk->disk_name, p, -PTR_ERR(part)); ++ if (IS_ERR(part)) { ++ if (PTR_ERR(part) != -ENXIO) { ++ printk(KERN_ERR " %s: p%d could not be added: %pe\n", ++ disk->disk_name, p, part); ++ } + return true; + } + +diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c +index a5da8ccd353ef7..43af5fa510c09f 100644 +--- a/crypto/asymmetric_keys/asymmetric_type.c ++++ b/crypto/asymmetric_keys/asymmetric_type.c +@@ -60,17 +60,18 @@ struct key *find_asymmetric_key(struct key *keyring, + char *req, *p; + int len; + +- WARN_ON(!id_0 && !id_1 && !id_2); +- + if (id_0) { + lookup = id_0->data; + len = id_0->len; + } else if (id_1) { + lookup = id_1->data; + len = id_1->len; +- } else { ++ } else if (id_2) { + lookup = id_2->data; + len = id_2->len; ++ } else { ++ WARN_ON(1); ++ return ERR_PTR(-EINVAL); + } + + /* Construct an identifier "id:". */ +diff --git a/crypto/xor.c b/crypto/xor.c +index 8e72e5d5db0ded..56aa3169e87171 100644 +--- a/crypto/xor.c ++++ b/crypto/xor.c +@@ -83,33 +83,30 @@ static void __init + do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2) + { + int speed; +- int i, j; +- ktime_t min, start, diff; ++ unsigned long reps; ++ ktime_t min, start, t0; + + tmpl->next = template_list; + template_list = tmpl; + + preempt_disable(); + +- min = (ktime_t)S64_MAX; +- for (i = 0; i < 3; i++) { +- start = ktime_get(); +- for (j = 0; j < REPS; j++) { +- mb(); /* prevent loop optimization */ +- tmpl->do_2(BENCH_SIZE, b1, b2); +- mb(); +- } +- diff = ktime_sub(ktime_get(), start); +- if (diff < min) +- min = diff; +- } ++ reps = 0; ++ t0 = ktime_get(); ++ /* delay start until time has advanced */ ++ while ((start = ktime_get()) == t0) ++ cpu_relax(); ++ do { ++ mb(); /* prevent loop optimization */ ++ tmpl->do_2(BENCH_SIZE, b1, b2); ++ mb(); ++ } while (reps++ < REPS || (t0 = ktime_get()) == start); ++ min = ktime_sub(t0, start); + + preempt_enable(); + + // bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s] +- if (!min) +- min = 1; +- speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min); ++ speed = (1000 * reps * BENCH_SIZE) / (unsigned int)ktime_to_ns(min); + tmpl->speed = speed; + + pr_info(" %-16s: %5d MB/sec\n", tmpl->name, speed); +diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c +index d6934ba7a3154e..28217a995f795c 100644 +--- a/drivers/acpi/cppc_acpi.c ++++ b/drivers/acpi/cppc_acpi.c +@@ -167,8 +167,11 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time); + #define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width) + + /* Shift and apply the mask for CPC reads/writes */ +-#define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & \ ++#define MASK_VAL_READ(reg, val) (((val) >> (reg)->bit_offset) & \ + GENMASK(((reg)->bit_width) - 1, 0)) ++#define MASK_VAL_WRITE(reg, prev_val, val) \ ++ ((((val) & GENMASK(((reg)->bit_width) - 1, 0)) << (reg)->bit_offset) | \ ++ ((prev_val) & ~(GENMASK(((reg)->bit_width) - 1, 0) << (reg)->bit_offset))) \ + + static ssize_t show_feedback_ctrs(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +@@ -852,6 +855,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) + + /* Store CPU Logical ID */ + cpc_ptr->cpu_id = pr->id; ++ spin_lock_init(&cpc_ptr->rmw_lock); + + /* Parse PSD data for this CPU */ + ret = acpi_get_psd(cpc_ptr, handle); +@@ -1057,7 +1061,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) + } + + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) +- *val = MASK_VAL(reg, *val); ++ *val = MASK_VAL_READ(reg, *val); + + return 0; + } +@@ -1066,9 +1070,11 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) + { + int ret_val = 0; + int size; ++ u64 prev_val; + void __iomem *vaddr = NULL; + int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); + struct cpc_reg *reg = ®_res->cpc_entry.reg; ++ struct cpc_desc *cpc_desc; + + size = GET_BIT_WIDTH(reg); + +@@ -1101,8 +1107,34 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) + return acpi_os_write_memory((acpi_physical_address)reg->address, + val, size); + +- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) +- val = MASK_VAL(reg, val); ++ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { ++ cpc_desc = per_cpu(cpc_desc_ptr, cpu); ++ if (!cpc_desc) { ++ pr_debug("No CPC descriptor for CPU:%d\n", cpu); ++ return -ENODEV; ++ } ++ ++ spin_lock(&cpc_desc->rmw_lock); ++ switch (size) { ++ case 8: ++ prev_val = readb_relaxed(vaddr); ++ break; ++ case 16: ++ prev_val = readw_relaxed(vaddr); ++ break; ++ case 32: ++ prev_val = readl_relaxed(vaddr); ++ break; ++ case 64: ++ prev_val = readq_relaxed(vaddr); ++ break; ++ default: ++ spin_unlock(&cpc_desc->rmw_lock); ++ return -EFAULT; ++ } ++ val = MASK_VAL_WRITE(reg, prev_val, val); ++ val |= prev_val; ++ } + + switch (size) { + case 8: +@@ -1129,6 +1161,9 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) + break; + } + ++ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) ++ spin_unlock(&cpc_desc->rmw_lock); ++ + return ret_val; + } + +diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c +index a34d8578b3da6c..6ed5e9e56be2f4 100644 +--- a/drivers/acpi/device_sysfs.c ++++ b/drivers/acpi/device_sysfs.c +@@ -544,8 +544,9 @@ int acpi_device_setup_files(struct acpi_device *dev) + * If device has _STR, 'description' file is created + */ + if (acpi_has_method(dev->handle, "_STR")) { +- status = acpi_evaluate_object(dev->handle, "_STR", +- NULL, &buffer); ++ status = acpi_evaluate_object_typed(dev->handle, "_STR", ++ NULL, &buffer, ++ ACPI_TYPE_BUFFER); + if (ACPI_FAILURE(status)) + buffer.pointer = NULL; + dev->pnp.str_obj = buffer.pointer; +diff --git a/drivers/acpi/pmic/tps68470_pmic.c b/drivers/acpi/pmic/tps68470_pmic.c +index ebd03e4729555a..0d1a82eeb4b0b6 100644 +--- a/drivers/acpi/pmic/tps68470_pmic.c ++++ b/drivers/acpi/pmic/tps68470_pmic.c +@@ -376,10 +376,8 @@ static int tps68470_pmic_opregion_probe(struct platform_device *pdev) + struct tps68470_pmic_opregion *opregion; + acpi_status status; + +- if (!dev || !tps68470_regmap) { +- dev_warn(dev, "dev or regmap is NULL\n"); +- return -EINVAL; +- } ++ if (!tps68470_regmap) ++ return dev_err_probe(dev, -EINVAL, "regmap is missing\n"); + + if (!handle) { + dev_warn(dev, "acpi handle is NULL\n"); +diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c +index 15f9d3d9c1cb81..3a13b22c8d9a54 100644 +--- a/drivers/acpi/resource.c ++++ b/drivers/acpi/resource.c +@@ -508,6 +508,12 @@ static const struct dmi_system_id maingear_laptop[] = { + DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"), + }, + }, ++ { ++ /* TongFang GMxXGxX/TUXEDO Polaris 15 Gen5 AMD */ ++ .matches = { ++ DMI_MATCH(DMI_BOARD_NAME, "GMxXGxX"), ++ }, ++ }, + { + /* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */ + .matches = { +diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c +index 1168e29cae86ec..cd87457375454a 100644 +--- a/drivers/ata/libata-eh.c ++++ b/drivers/ata/libata-eh.c +@@ -618,6 +618,14 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, + list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { + struct ata_queued_cmd *qc; + ++ /* ++ * If the scmd was added to EH, via ata_qc_schedule_eh() -> ++ * scsi_timeout() -> scsi_eh_scmd_add(), scsi_timeout() will ++ * have set DID_TIME_OUT (since libata does not have an abort ++ * handler). Thus, to clear DID_TIME_OUT, clear the host byte. ++ */ ++ set_host_byte(scmd, DID_OK); ++ + ata_qc_for_each_raw(ap, qc, i) { + if (qc->flags & ATA_QCFLAG_ACTIVE && + qc->scsicmd == scmd) +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index c91f8746289f4a..5377d094bf7548 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -1725,9 +1725,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) + set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION); + } else if (is_error && !have_sense) { + ata_gen_ata_sense(qc); +- } else { +- /* Keep the SCSI ML and status byte, clear host byte. */ +- cmd->result &= 0x0000ffff; + } + + ata_qc_done(qc); +@@ -2393,7 +2390,7 @@ static unsigned int ata_msense_control(struct ata_device *dev, u8 *buf, + case ALL_SUB_MPAGES: + n = ata_msense_control_spg0(dev, buf, changeable); + n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE); +- n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE); ++ n += ata_msense_control_spgt2(dev, buf + n, CDL_T2B_SUB_MPAGE); + n += ata_msense_control_ata_feature(dev, buf + n); + return n; + default: +diff --git a/drivers/base/core.c b/drivers/base/core.c +index cb323700e952f8..60a0a4630a5bb2 100644 +--- a/drivers/base/core.c ++++ b/drivers/base/core.c +@@ -4485,9 +4485,11 @@ EXPORT_SYMBOL_GPL(device_destroy); + */ + int device_rename(struct device *dev, const char *new_name) + { ++ struct subsys_private *sp = NULL; + struct kobject *kobj = &dev->kobj; + char *old_device_name = NULL; + int error; ++ bool is_link_renamed = false; + + dev = get_device(dev); + if (!dev) +@@ -4502,7 +4504,7 @@ int device_rename(struct device *dev, const char *new_name) + } + + if (dev->class) { +- struct subsys_private *sp = class_to_subsys(dev->class); ++ sp = class_to_subsys(dev->class); + + if (!sp) { + error = -EINVAL; +@@ -4511,16 +4513,19 @@ int device_rename(struct device *dev, const char *new_name) + + error = sysfs_rename_link_ns(&sp->subsys.kobj, kobj, old_device_name, + new_name, kobject_namespace(kobj)); +- subsys_put(sp); + if (error) + goto out; ++ ++ is_link_renamed = true; + } + + error = kobject_rename(kobj, new_name); +- if (error) +- goto out; +- + out: ++ if (error && is_link_renamed) ++ sysfs_rename_link_ns(&sp->subsys.kobj, kobj, new_name, ++ old_device_name, kobject_namespace(kobj)); ++ subsys_put(sp); ++ + put_device(dev); + + kfree(old_device_name); +diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c +index b58c42f1b1ce65..0b18c6b46e65d8 100644 +--- a/drivers/base/firmware_loader/main.c ++++ b/drivers/base/firmware_loader/main.c +@@ -844,6 +844,26 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name, + {} + #endif + ++/* ++ * Reject firmware file names with ".." path components. ++ * There are drivers that construct firmware file names from device-supplied ++ * strings, and we don't want some device to be able to tell us "I would like to ++ * be sent my firmware from ../../../etc/shadow, please". ++ * ++ * Search for ".." surrounded by either '/' or start/end of string. ++ * ++ * This intentionally only looks at the firmware name, not at the firmware base ++ * directory or at symlink contents. ++ */ ++static bool name_contains_dotdot(const char *name) ++{ ++ size_t name_len = strlen(name); ++ ++ return strcmp(name, "..") == 0 || strncmp(name, "../", 3) == 0 || ++ strstr(name, "/../") != NULL || ++ (name_len >= 3 && strcmp(name+name_len-3, "/..") == 0); ++} ++ + /* called from request_firmware() and request_firmware_work_func() */ + static int + _request_firmware(const struct firmware **firmware_p, const char *name, +@@ -864,6 +884,14 @@ _request_firmware(const struct firmware **firmware_p, const char *name, + goto out; + } + ++ if (name_contains_dotdot(name)) { ++ dev_warn(device, ++ "Firmware load for '%s' refused, path contains '..' component\n", ++ name); ++ ret = -EINVAL; ++ goto out; ++ } ++ + ret = _request_firmware_prepare(&fw, name, device, buf, size, + offset, opt_flags); + if (ret <= 0) /* error or already assigned */ +@@ -941,6 +969,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name, + * @name will be used as $FIRMWARE in the uevent environment and + * should be distinctive enough not to be confused with any other + * firmware image for this or any other device. ++ * It must not contain any ".." path components - "foo/bar..bin" is ++ * allowed, but "foo/../bar.bin" is not. + * + * Caller must hold the reference count of @device. + * +diff --git a/drivers/base/module.c b/drivers/base/module.c +index b0b79b9c189d4f..0d5c5da367f720 100644 +--- a/drivers/base/module.c ++++ b/drivers/base/module.c +@@ -66,27 +66,31 @@ int module_add_driver(struct module *mod, struct device_driver *drv) + driver_name = make_driver_name(drv); + if (!driver_name) { + ret = -ENOMEM; +- goto out; ++ goto out_remove_kobj; + } + + module_create_drivers_dir(mk); + if (!mk->drivers_dir) { + ret = -EINVAL; +- goto out; ++ goto out_free_driver_name; + } + + ret = sysfs_create_link(mk->drivers_dir, &drv->p->kobj, driver_name); + if (ret) +- goto out; ++ goto out_remove_drivers_dir; + + kfree(driver_name); + + return 0; +-out: +- sysfs_remove_link(&drv->p->kobj, "module"); ++ ++out_remove_drivers_dir: + sysfs_remove_link(mk->drivers_dir, driver_name); ++ ++out_free_driver_name: + kfree(driver_name); + ++out_remove_kobj: ++ sysfs_remove_link(&drv->p->kobj, "module"); + return ret; + } + +diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c +index 84443b6bd8825e..582564f8dde6f9 100644 +--- a/drivers/base/power/domain.c ++++ b/drivers/base/power/domain.c +@@ -3135,7 +3135,7 @@ static int genpd_summary_one(struct seq_file *s, + else + snprintf(state, sizeof(state), "%s", + status_lookup[genpd->status]); +- seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state); ++ seq_printf(s, "%-30s %-49s %u", genpd->name, state, genpd->performance_state); + + /* + * Modifications on the list require holding locks on both +diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c +index 6bc86106c7b2ab..102cc3034412d9 100644 +--- a/drivers/block/drbd/drbd_main.c ++++ b/drivers/block/drbd/drbd_main.c +@@ -3392,10 +3392,12 @@ void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local) + void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local) + { + unsigned long flags; +- if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) ++ spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); ++ if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) { ++ spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags); + return; ++ } + +- spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); + if (val == 0) { + drbd_uuid_move_history(device); + device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP]; +diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c +index 287a8d1d3f707f..87cf5883078f5b 100644 +--- a/drivers/block/drbd/drbd_state.c ++++ b/drivers/block/drbd/drbd_state.c +@@ -876,7 +876,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns) + ns.disk == D_OUTDATED) + rv = SS_CONNECTED_OUTDATES; + +- else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && ++ else if (nc && (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && + (nc->verify_alg[0] == 0)) + rv = SS_NO_VERIFY_ALG; + +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c +index 1089dc646b8082..96b349148e5788 100644 +--- a/drivers/block/nbd.c ++++ b/drivers/block/nbd.c +@@ -181,6 +181,17 @@ static void nbd_requeue_cmd(struct nbd_cmd *cmd) + { + struct request *req = blk_mq_rq_from_pdu(cmd); + ++ lockdep_assert_held(&cmd->lock); ++ ++ /* ++ * Clear INFLIGHT flag so that this cmd won't be completed in ++ * normal completion path ++ * ++ * INFLIGHT flag will be set when the cmd is queued to nbd next ++ * time. ++ */ ++ __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); ++ + if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags)) + blk_mq_requeue_request(req, true); + } +@@ -461,8 +472,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req) + nbd_mark_nsock_dead(nbd, nsock, 1); + mutex_unlock(&nsock->tx_lock); + } +- mutex_unlock(&cmd->lock); + nbd_requeue_cmd(cmd); ++ mutex_unlock(&cmd->lock); + nbd_config_put(nbd); + return BLK_EH_DONE; + } +diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c +index bf7f68e90953bc..9cafbce1faf388 100644 +--- a/drivers/block/ublk_drv.c ++++ b/drivers/block/ublk_drv.c +@@ -68,9 +68,6 @@ struct ublk_rq_data { + struct llist_node node; + + struct kref ref; +- __u64 sector; +- __u32 operation; +- __u32 nr_zones; + }; + + struct ublk_uring_cmd_pdu { +@@ -215,6 +212,33 @@ static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq) + + #ifdef CONFIG_BLK_DEV_ZONED + ++struct ublk_zoned_report_desc { ++ __u64 sector; ++ __u32 operation; ++ __u32 nr_zones; ++}; ++ ++static DEFINE_XARRAY(ublk_zoned_report_descs); ++ ++static int ublk_zoned_insert_report_desc(const struct request *req, ++ struct ublk_zoned_report_desc *desc) ++{ ++ return xa_insert(&ublk_zoned_report_descs, (unsigned long)req, ++ desc, GFP_KERNEL); ++} ++ ++static struct ublk_zoned_report_desc *ublk_zoned_erase_report_desc( ++ const struct request *req) ++{ ++ return xa_erase(&ublk_zoned_report_descs, (unsigned long)req); ++} ++ ++static struct ublk_zoned_report_desc *ublk_zoned_get_report_desc( ++ const struct request *req) ++{ ++ return xa_load(&ublk_zoned_report_descs, (unsigned long)req); ++} ++ + static int ublk_get_nr_zones(const struct ublk_device *ub) + { + const struct ublk_param_basic *p = &ub->params.basic; +@@ -321,7 +345,7 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector, + unsigned int zones_in_request = + min_t(unsigned int, remaining_zones, max_zones_per_request); + struct request *req; +- struct ublk_rq_data *pdu; ++ struct ublk_zoned_report_desc desc; + blk_status_t status; + + memset(buffer, 0, buffer_length); +@@ -332,20 +356,23 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector, + goto out; + } + +- pdu = blk_mq_rq_to_pdu(req); +- pdu->operation = UBLK_IO_OP_REPORT_ZONES; +- pdu->sector = sector; +- pdu->nr_zones = zones_in_request; ++ desc.operation = UBLK_IO_OP_REPORT_ZONES; ++ desc.sector = sector; ++ desc.nr_zones = zones_in_request; ++ ret = ublk_zoned_insert_report_desc(req, &desc); ++ if (ret) ++ goto free_req; + + ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length, + GFP_KERNEL); +- if (ret) { +- blk_mq_free_request(req); +- goto out; +- } ++ if (ret) ++ goto erase_desc; + + status = blk_execute_rq(req, 0); + ret = blk_status_to_errno(status); ++erase_desc: ++ ublk_zoned_erase_report_desc(req); ++free_req: + blk_mq_free_request(req); + if (ret) + goto out; +@@ -379,7 +406,7 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq, + { + struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); + struct ublk_io *io = &ubq->ios[req->tag]; +- struct ublk_rq_data *pdu = blk_mq_rq_to_pdu(req); ++ struct ublk_zoned_report_desc *desc; + u32 ublk_op; + + switch (req_op(req)) { +@@ -402,12 +429,15 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq, + ublk_op = UBLK_IO_OP_ZONE_RESET_ALL; + break; + case REQ_OP_DRV_IN: +- ublk_op = pdu->operation; ++ desc = ublk_zoned_get_report_desc(req); ++ if (!desc) ++ return BLK_STS_IOERR; ++ ublk_op = desc->operation; + switch (ublk_op) { + case UBLK_IO_OP_REPORT_ZONES: + iod->op_flags = ublk_op | ublk_req_build_flags(req); +- iod->nr_zones = pdu->nr_zones; +- iod->start_sector = pdu->sector; ++ iod->nr_zones = desc->nr_zones; ++ iod->start_sector = desc->sector; + return BLK_STS_OK; + default: + return BLK_STS_IOERR; +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index c495fceda20a2d..0a58106207b0c3 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -1352,7 +1352,10 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags) + if (!urb) + return -ENOMEM; + +- size = le16_to_cpu(data->intr_ep->wMaxPacketSize); ++ /* Use maximum HCI Event size so the USB stack handles ++ * ZPL/short-transfer automatically. ++ */ ++ size = HCI_MAX_EVENT_SIZE; + + buf = kmalloc(size, mem_flags); + if (!buf) { +diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c +index b715c8ab36e8bd..a65c79b08804f4 100644 +--- a/drivers/bus/arm-integrator-lm.c ++++ b/drivers/bus/arm-integrator-lm.c +@@ -85,6 +85,7 @@ static int integrator_ap_lm_probe(struct platform_device *pdev) + return -ENODEV; + } + map = syscon_node_to_regmap(syscon); ++ of_node_put(syscon); + if (IS_ERR(map)) { + dev_err(dev, + "could not find Integrator/AP system controller\n"); +diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c +index 08f3f039dbddcf..154841916f5652 100644 +--- a/drivers/bus/mhi/host/pci_generic.c ++++ b/drivers/bus/mhi/host/pci_generic.c +@@ -578,6 +578,15 @@ static const struct mhi_pci_dev_info mhi_telit_fn990_info = { + .mru_default = 32768, + }; + ++static const struct mhi_pci_dev_info mhi_telit_fe990a_info = { ++ .name = "telit-fe990a", ++ .config = &modem_telit_fn990_config, ++ .bar_num = MHI_PCI_DEFAULT_BAR_NUM, ++ .dma_data_width = 32, ++ .sideband_wake = false, ++ .mru_default = 32768, ++}; ++ + /* Keep the list sorted based on the PID. New VID should be added as the last entry */ + static const struct pci_device_id mhi_pci_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), +@@ -595,9 +604,9 @@ static const struct pci_device_id mhi_pci_id_table[] = { + /* Telit FN990 */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010), + .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, +- /* Telit FE990 */ ++ /* Telit FE990A */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015), +- .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, ++ .driver_data = (kernel_ulong_t) &mhi_telit_fe990a_info }, + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), + .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, + { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */ +diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c +index 4c08efe7f37538..57a80ec93badac 100644 +--- a/drivers/char/hw_random/bcm2835-rng.c ++++ b/drivers/char/hw_random/bcm2835-rng.c +@@ -94,8 +94,10 @@ static int bcm2835_rng_init(struct hwrng *rng) + return ret; + + ret = reset_control_reset(priv->reset); +- if (ret) ++ if (ret) { ++ clk_disable_unprepare(priv->clk); + return ret; ++ } + + if (priv->mask_interrupts) { + /* mask the interrupt */ +diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c +index 1abbff04a015a5..a55f5f2d35dff7 100644 +--- a/drivers/char/hw_random/cctrng.c ++++ b/drivers/char/hw_random/cctrng.c +@@ -624,6 +624,7 @@ static int __maybe_unused cctrng_resume(struct device *dev) + /* wait for Cryptocell reset completion */ + if (!cctrng_wait_for_reset_completion(drvdata)) { + dev_err(dev, "Cryptocell reset not completed"); ++ clk_disable_unprepare(drvdata->clk); + return -EBUSY; + } + +diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c +index aa993753ab120b..1e3048f2bb38f0 100644 +--- a/drivers/char/hw_random/mtk-rng.c ++++ b/drivers/char/hw_random/mtk-rng.c +@@ -142,7 +142,7 @@ static int mtk_rng_probe(struct platform_device *pdev) + dev_set_drvdata(&pdev->dev, priv); + pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); +- pm_runtime_enable(&pdev->dev); ++ devm_pm_runtime_enable(&pdev->dev); + + dev_info(&pdev->dev, "registered RNG driver\n"); + +diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c +index 30b4c288c1bbc3..c3fbbf4d3db79a 100644 +--- a/drivers/char/tpm/tpm-dev-common.c ++++ b/drivers/char/tpm/tpm-dev-common.c +@@ -47,6 +47,8 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space, + + if (!ret) + ret = tpm2_commit_space(chip, space, buf, &len); ++ else ++ tpm2_flush_space(chip); + + out_rc: + return ret ? ret : len; +diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c +index 363afdd4d1d306..d4d1007fe8117e 100644 +--- a/drivers/char/tpm/tpm2-space.c ++++ b/drivers/char/tpm/tpm2-space.c +@@ -166,6 +166,9 @@ void tpm2_flush_space(struct tpm_chip *chip) + struct tpm_space *space = &chip->work_space; + int i; + ++ if (!space) ++ return; ++ + for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++) + if (space->context_tbl[i] && ~space->context_tbl[i]) + tpm2_flush_context(chip, space->context_tbl[i]); +diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c +index 91b5c6f1481964..4e9594714b1428 100644 +--- a/drivers/clk/at91/sama7g5.c ++++ b/drivers/clk/at91/sama7g5.c +@@ -66,6 +66,7 @@ enum pll_component_id { + PLL_COMPID_FRAC, + PLL_COMPID_DIV0, + PLL_COMPID_DIV1, ++ PLL_COMPID_MAX, + }; + + /* +@@ -165,7 +166,7 @@ static struct sama7g5_pll { + u8 t; + u8 eid; + u8 safe_div; +-} sama7g5_plls[][PLL_ID_MAX] = { ++} sama7g5_plls[][PLL_COMPID_MAX] = { + [PLL_ID_CPU] = { + [PLL_COMPID_FRAC] = { + .n = "cpupll_fracck", +@@ -1038,7 +1039,7 @@ static void __init sama7g5_pmc_setup(struct device_node *np) + sama7g5_pmc->chws[PMC_MAIN] = hw; + + for (i = 0; i < PLL_ID_MAX; i++) { +- for (j = 0; j < 3; j++) { ++ for (j = 0; j < PLL_COMPID_MAX; j++) { + struct clk_hw *parent_hw; + + if (!sama7g5_plls[i][j].n) +diff --git a/drivers/clk/imx/clk-composite-7ulp.c b/drivers/clk/imx/clk-composite-7ulp.c +index e208ddc511339e..db7f40b07d1abf 100644 +--- a/drivers/clk/imx/clk-composite-7ulp.c ++++ b/drivers/clk/imx/clk-composite-7ulp.c +@@ -14,6 +14,7 @@ + #include "../clk-fractional-divider.h" + #include "clk.h" + ++#define PCG_PR_MASK BIT(31) + #define PCG_PCS_SHIFT 24 + #define PCG_PCS_MASK 0x7 + #define PCG_CGC_SHIFT 30 +@@ -78,6 +79,12 @@ static struct clk_hw *imx_ulp_clk_hw_composite(const char *name, + struct clk_hw *hw; + u32 val; + ++ val = readl(reg); ++ if (!(val & PCG_PR_MASK)) { ++ pr_info("PCC PR is 0 for clk:%s, bypass\n", name); ++ return 0; ++ } ++ + if (mux_present) { + mux = kzalloc(sizeof(*mux), GFP_KERNEL); + if (!mux) +diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c +index 27a08c50ac1d84..ac5e9d60acb833 100644 +--- a/drivers/clk/imx/clk-composite-8m.c ++++ b/drivers/clk/imx/clk-composite-8m.c +@@ -204,6 +204,34 @@ static const struct clk_ops imx8m_clk_composite_mux_ops = { + .determine_rate = imx8m_clk_composite_mux_determine_rate, + }; + ++static int imx8m_clk_composite_gate_enable(struct clk_hw *hw) ++{ ++ struct clk_gate *gate = to_clk_gate(hw); ++ unsigned long flags; ++ u32 val; ++ ++ spin_lock_irqsave(gate->lock, flags); ++ ++ val = readl(gate->reg); ++ val |= BIT(gate->bit_idx); ++ writel(val, gate->reg); ++ ++ spin_unlock_irqrestore(gate->lock, flags); ++ ++ return 0; ++} ++ ++static void imx8m_clk_composite_gate_disable(struct clk_hw *hw) ++{ ++ /* composite clk requires the disable hook */ ++} ++ ++static const struct clk_ops imx8m_clk_composite_gate_ops = { ++ .enable = imx8m_clk_composite_gate_enable, ++ .disable = imx8m_clk_composite_gate_disable, ++ .is_enabled = clk_gate_is_enabled, ++}; ++ + struct clk_hw *__imx8m_clk_hw_composite(const char *name, + const char * const *parent_names, + int num_parents, void __iomem *reg, +@@ -217,10 +245,11 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name, + struct clk_mux *mux = NULL; + const struct clk_ops *divider_ops; + const struct clk_ops *mux_ops; ++ const struct clk_ops *gate_ops; + + mux = kzalloc(sizeof(*mux), GFP_KERNEL); + if (!mux) +- goto fail; ++ return ERR_CAST(hw); + + mux_hw = &mux->hw; + mux->reg = reg; +@@ -230,7 +259,7 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name, + + div = kzalloc(sizeof(*div), GFP_KERNEL); + if (!div) +- goto fail; ++ goto free_mux; + + div_hw = &div->hw; + div->reg = reg; +@@ -257,28 +286,32 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name, + div->flags = CLK_DIVIDER_ROUND_CLOSEST; + + /* skip registering the gate ops if M4 is enabled */ +- if (!mcore_booted) { +- gate = kzalloc(sizeof(*gate), GFP_KERNEL); +- if (!gate) +- goto fail; +- +- gate_hw = &gate->hw; +- gate->reg = reg; +- gate->bit_idx = PCG_CGC_SHIFT; +- gate->lock = &imx_ccm_lock; +- } ++ gate = kzalloc(sizeof(*gate), GFP_KERNEL); ++ if (!gate) ++ goto free_div; ++ ++ gate_hw = &gate->hw; ++ gate->reg = reg; ++ gate->bit_idx = PCG_CGC_SHIFT; ++ gate->lock = &imx_ccm_lock; ++ if (!mcore_booted) ++ gate_ops = &clk_gate_ops; ++ else ++ gate_ops = &imx8m_clk_composite_gate_ops; + + hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, + mux_hw, mux_ops, div_hw, +- divider_ops, gate_hw, &clk_gate_ops, flags); ++ divider_ops, gate_hw, gate_ops, flags); + if (IS_ERR(hw)) +- goto fail; ++ goto free_gate; + + return hw; + +-fail: ++free_gate: + kfree(gate); ++free_div: + kfree(div); ++free_mux: + kfree(mux); + return ERR_CAST(hw); + } +diff --git a/drivers/clk/imx/clk-composite-93.c b/drivers/clk/imx/clk-composite-93.c +index 81164bdcd6cc9a..6c6c5a30f3282d 100644 +--- a/drivers/clk/imx/clk-composite-93.c ++++ b/drivers/clk/imx/clk-composite-93.c +@@ -76,6 +76,13 @@ static int imx93_clk_composite_gate_enable(struct clk_hw *hw) + + static void imx93_clk_composite_gate_disable(struct clk_hw *hw) + { ++ /* ++ * Skip disable the root clock gate if mcore enabled. ++ * The root clock may be used by the mcore. ++ */ ++ if (mcore_booted) ++ return; ++ + imx93_clk_composite_gate_endisable(hw, 0); + } + +@@ -222,7 +229,7 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p + hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, + mux_hw, &clk_mux_ro_ops, div_hw, + &clk_divider_ro_ops, NULL, NULL, flags); +- } else if (!mcore_booted) { ++ } else { + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) + goto fail; +@@ -238,12 +245,6 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p + &imx93_clk_composite_divider_ops, gate_hw, + &imx93_clk_composite_gate_ops, + flags | CLK_SET_RATE_NO_REPARENT); +- } else { +- hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, +- mux_hw, &imx93_clk_composite_mux_ops, div_hw, +- &imx93_clk_composite_divider_ops, NULL, +- &imx93_clk_composite_gate_ops, +- flags | CLK_SET_RATE_NO_REPARENT); + } + + if (IS_ERR(hw)) +diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c +index 44462ab50e513c..1becba2b62d0be 100644 +--- a/drivers/clk/imx/clk-fracn-gppll.c ++++ b/drivers/clk/imx/clk-fracn-gppll.c +@@ -291,6 +291,10 @@ static int clk_fracn_gppll_prepare(struct clk_hw *hw) + if (val & POWERUP_MASK) + return 0; + ++ if (pll->flags & CLK_FRACN_GPPLL_FRACN) ++ writel_relaxed(readl_relaxed(pll->base + PLL_NUMERATOR), ++ pll->base + PLL_NUMERATOR); ++ + val |= CLKMUX_BYPASS; + writel_relaxed(val, pll->base + PLL_CTRL); + +diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c +index f9394e94f69d73..05c7a82b751f3c 100644 +--- a/drivers/clk/imx/clk-imx6ul.c ++++ b/drivers/clk/imx/clk-imx6ul.c +@@ -542,8 +542,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node) + + clk_set_parent(hws[IMX6UL_CLK_ENFC_SEL]->clk, hws[IMX6UL_CLK_PLL2_PFD2]->clk); + +- clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET_REF]->clk); +- clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF]->clk); ++ clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET1_REF_125M]->clk); ++ clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF_125M]->clk); + + imx_register_uart_clocks(); + } +diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c +index 55ed211a5e0b18..ab2a028b3027d3 100644 +--- a/drivers/clk/imx/clk-imx8mp-audiomix.c ++++ b/drivers/clk/imx/clk-imx8mp-audiomix.c +@@ -146,6 +146,15 @@ static const struct clk_parent_data clk_imx8mp_audiomix_pll_bypass_sels[] = { + PDM_SEL, 2, 0 \ + } + ++#define CLK_GATE_PARENT(gname, cname, pname) \ ++ { \ ++ gname"_cg", \ ++ IMX8MP_CLK_AUDIOMIX_##cname, \ ++ { .fw_name = pname, .name = pname }, NULL, 1, \ ++ CLKEN0 + 4 * !!(IMX8MP_CLK_AUDIOMIX_##cname / 32), \ ++ 1, IMX8MP_CLK_AUDIOMIX_##cname % 32 \ ++ } ++ + struct clk_imx8mp_audiomix_sel { + const char *name; + int clkid; +@@ -163,14 +172,14 @@ static struct clk_imx8mp_audiomix_sel sels[] = { + CLK_GATE("earc", EARC_IPG), + CLK_GATE("ocrama", OCRAMA_IPG), + CLK_GATE("aud2htx", AUD2HTX_IPG), +- CLK_GATE("earc_phy", EARC_PHY), ++ CLK_GATE_PARENT("earc_phy", EARC_PHY, "sai_pll_out_div2"), + CLK_GATE("sdma2", SDMA2_ROOT), + CLK_GATE("sdma3", SDMA3_ROOT), + CLK_GATE("spba2", SPBA2_ROOT), + CLK_GATE("dsp", DSP_ROOT), + CLK_GATE("dspdbg", DSPDBG_ROOT), + CLK_GATE("edma", EDMA_ROOT), +- CLK_GATE("audpll", AUDPLL_ROOT), ++ CLK_GATE_PARENT("audpll", AUDPLL_ROOT, "osc_24m"), + CLK_GATE("mu2", MU2_ROOT), + CLK_GATE("mu3", MU3_ROOT), + CLK_PDM, +diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c +index 670aa2bab3017e..e561ff7b135fb5 100644 +--- a/drivers/clk/imx/clk-imx8mp.c ++++ b/drivers/clk/imx/clk-imx8mp.c +@@ -551,8 +551,8 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) + + hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1); + +- hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000); +- hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080); ++ hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000); ++ hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080); + hws[IMX8MP_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mp_vpu_g1_sels, ccm_base + 0xa100); + hws[IMX8MP_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mp_vpu_g2_sels, ccm_base + 0xa180); + hws[IMX8MP_CLK_CAN1] = imx8m_clk_hw_composite("can1", imx8mp_can1_sels, ccm_base + 0xa200); +diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c +index 245761e0189739..6d458995f3887d 100644 +--- a/drivers/clk/imx/clk-imx8qxp.c ++++ b/drivers/clk/imx/clk-imx8qxp.c +@@ -165,8 +165,8 @@ static int imx8qxp_clk_probe(struct platform_device *pdev) + imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER); + imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL); + imx_clk_scu2("lcd_clk", lcd_sels, ARRAY_SIZE(lcd_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER); +- imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0); + imx_clk_scu("lcd_pxl_bypass_div_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_BYPASS); ++ imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0); + + /* Audio SS */ + imx_clk_scu("audio_pll0_clk", IMX_SC_R_AUDIO_PLL_0, IMX_SC_PM_CLK_PLL); +@@ -199,18 +199,18 @@ static int imx8qxp_clk_probe(struct platform_device *pdev) + imx_clk_scu("usb3_lpm_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MISC); + + /* Display controller SS */ +- imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0); +- imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1); + imx_clk_scu("dc0_pll0_clk", IMX_SC_R_DC_0_PLL_0, IMX_SC_PM_CLK_PLL); + imx_clk_scu("dc0_pll1_clk", IMX_SC_R_DC_0_PLL_1, IMX_SC_PM_CLK_PLL); + imx_clk_scu("dc0_bypass0_clk", IMX_SC_R_DC_0_VIDEO0, IMX_SC_PM_CLK_BYPASS); ++ imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0); ++ imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1); + imx_clk_scu("dc0_bypass1_clk", IMX_SC_R_DC_0_VIDEO1, IMX_SC_PM_CLK_BYPASS); + +- imx_clk_scu2("dc1_disp0_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC0); +- imx_clk_scu2("dc1_disp1_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC1); + imx_clk_scu("dc1_pll0_clk", IMX_SC_R_DC_1_PLL_0, IMX_SC_PM_CLK_PLL); + imx_clk_scu("dc1_pll1_clk", IMX_SC_R_DC_1_PLL_1, IMX_SC_PM_CLK_PLL); + imx_clk_scu("dc1_bypass0_clk", IMX_SC_R_DC_1_VIDEO0, IMX_SC_PM_CLK_BYPASS); ++ imx_clk_scu2("dc1_disp0_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC0); ++ imx_clk_scu2("dc1_disp1_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC1); + imx_clk_scu("dc1_bypass1_clk", IMX_SC_R_DC_1_VIDEO1, IMX_SC_PM_CLK_BYPASS); + + /* MIPI-LVDS SS */ +diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c +index 1701cce74df796..7464cf64803fd7 100644 +--- a/drivers/clk/qcom/clk-alpha-pll.c ++++ b/drivers/clk/qcom/clk-alpha-pll.c +@@ -1757,6 +1757,58 @@ const struct clk_ops clk_alpha_pll_agera_ops = { + }; + EXPORT_SYMBOL_GPL(clk_alpha_pll_agera_ops); + ++/** ++ * clk_lucid_5lpe_pll_configure - configure the lucid 5lpe pll ++ * ++ * @pll: clk alpha pll ++ * @regmap: register map ++ * @config: configuration to apply for pll ++ */ ++void clk_lucid_5lpe_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, ++ const struct alpha_pll_config *config) ++{ ++ /* ++ * If the bootloader left the PLL enabled it's likely that there are ++ * RCGs that will lock up if we disable the PLL below. ++ */ ++ if (trion_pll_is_enabled(pll, regmap)) { ++ pr_debug("Lucid 5LPE PLL is already enabled, skipping configuration\n"); ++ return; ++ } ++ ++ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l); ++ regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL); ++ clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha); ++ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll), ++ config->config_ctl_val); ++ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll), ++ config->config_ctl_hi_val); ++ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll), ++ config->config_ctl_hi1_val); ++ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll), ++ config->user_ctl_val); ++ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll), ++ config->user_ctl_hi_val); ++ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U1(pll), ++ config->user_ctl_hi1_val); ++ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll), ++ config->test_ctl_val); ++ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll), ++ config->test_ctl_hi_val); ++ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll), ++ config->test_ctl_hi1_val); ++ ++ /* Disable PLL output */ ++ regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0); ++ ++ /* Set operation mode to OFF */ ++ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY); ++ ++ /* Place the PLL in STANDBY mode */ ++ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N); ++} ++EXPORT_SYMBOL_GPL(clk_lucid_5lpe_pll_configure); ++ + static int alpha_pll_lucid_5lpe_enable(struct clk_hw *hw) + { + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); +diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h +index 903fbab9b58e9f..3fd0ef41c72c89 100644 +--- a/drivers/clk/qcom/clk-alpha-pll.h ++++ b/drivers/clk/qcom/clk-alpha-pll.h +@@ -198,6 +198,8 @@ void clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, + + void clk_zonda_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, + const struct alpha_pll_config *config); ++void clk_lucid_5lpe_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, ++ const struct alpha_pll_config *config); + void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, + const struct alpha_pll_config *config); + void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, +diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c +index e17bb8b543b51b..9a9e0852c91f23 100644 +--- a/drivers/clk/qcom/dispcc-sm8250.c ++++ b/drivers/clk/qcom/dispcc-sm8250.c +@@ -1359,8 +1359,13 @@ static int disp_cc_sm8250_probe(struct platform_device *pdev) + disp_cc_sm8250_clocks[DISP_CC_MDSS_EDP_GTC_CLK_SRC] = NULL; + } + +- clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); +- clk_lucid_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config); ++ if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8350-dispcc")) { ++ clk_lucid_5lpe_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); ++ clk_lucid_5lpe_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config); ++ } else { ++ clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); ++ clk_lucid_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config); ++ } + + /* Enable clock gating for MDP clocks */ + regmap_update_bits(regmap, 0x8000, 0x10, 0x10); +diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c +index b9edeb2a221dcd..95b4c0548f50d6 100644 +--- a/drivers/clk/qcom/dispcc-sm8550.c ++++ b/drivers/clk/qcom/dispcc-sm8550.c +@@ -196,7 +196,7 @@ static const struct clk_parent_data disp_cc_parent_data_3[] = { + static const struct parent_map disp_cc_parent_map_4[] = { + { P_BI_TCXO, 0 }, + { P_DP0_PHY_PLL_LINK_CLK, 1 }, +- { P_DP1_PHY_PLL_VCO_DIV_CLK, 2 }, ++ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 }, + { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 }, + { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 }, + { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 }, +@@ -213,7 +213,7 @@ static const struct clk_parent_data disp_cc_parent_data_4[] = { + + static const struct parent_map disp_cc_parent_map_5[] = { + { P_BI_TCXO, 0 }, +- { P_DSI0_PHY_PLL_OUT_BYTECLK, 4 }, ++ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, + { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 }, + }; + +@@ -400,7 +400,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = { + .parent_data = disp_cc_parent_data_0, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_dp_ops, ++ .ops = &clk_rcg2_ops, + }, + }; + +@@ -562,7 +562,7 @@ static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = { + .parent_data = disp_cc_parent_data_5, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_5), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -577,7 +577,7 @@ static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = { + .parent_data = disp_cc_parent_data_5, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_5), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -1611,7 +1611,7 @@ static struct gdsc mdss_gdsc = { + .name = "mdss_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +- .flags = HW_CTRL | RETAIN_FF_ENABLE, ++ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE, + }; + + static struct gdsc mdss_int2_gdsc = { +@@ -1620,7 +1620,7 @@ static struct gdsc mdss_int2_gdsc = { + .name = "mdss_int2_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +- .flags = HW_CTRL | RETAIN_FF_ENABLE, ++ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE, + }; + + static struct clk_regmap *disp_cc_sm8550_clocks[] = { +diff --git a/drivers/clk/qcom/gcc-ipq5332.c b/drivers/clk/qcom/gcc-ipq5332.c +index f98591148a9767..6a4877d8882946 100644 +--- a/drivers/clk/qcom/gcc-ipq5332.c ++++ b/drivers/clk/qcom/gcc-ipq5332.c +@@ -3388,6 +3388,7 @@ static struct clk_regmap *gcc_ipq5332_clocks[] = { + [GCC_QDSS_DAP_DIV_CLK_SRC] = &gcc_qdss_dap_div_clk_src.clkr, + [GCC_QDSS_ETR_USB_CLK] = &gcc_qdss_etr_usb_clk.clkr, + [GCC_QDSS_EUD_AT_CLK] = &gcc_qdss_eud_at_clk.clkr, ++ [GCC_QDSS_TSCTR_CLK_SRC] = &gcc_qdss_tsctr_clk_src.clkr, + [GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr, + [GCC_QPIC_CLK] = &gcc_qpic_clk.clkr, + [GCC_QPIC_IO_MACRO_CLK] = &gcc_qpic_io_macro_clk.clkr, +diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c +index a24a35553e1349..7343d2d7676bca 100644 +--- a/drivers/clk/rockchip/clk-rk3228.c ++++ b/drivers/clk/rockchip/clk-rk3228.c +@@ -409,7 +409,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { + RK2928_CLKSEL_CON(29), 0, 3, DFLAGS), + DIV(0, "sclk_vop_pre", "sclk_vop_src", 0, + RK2928_CLKSEL_CON(27), 8, 8, DFLAGS), +- MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, 0, ++ MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + RK2928_CLKSEL_CON(27), 1, 1, MFLAGS), + + FACTOR(0, "xin12m", "xin24m", 0, 1, 2), +diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c +index 6994165e03957c..d8ffcaefa480b3 100644 +--- a/drivers/clk/rockchip/clk-rk3588.c ++++ b/drivers/clk/rockchip/clk-rk3588.c +@@ -526,7 +526,7 @@ PNAME(pmu_200m_100m_p) = { "clk_pmu1_200m_src", "clk_pmu1_100m_src" }; + PNAME(pmu_300m_24m_p) = { "clk_300m_src", "xin24m" }; + PNAME(pmu_400m_24m_p) = { "clk_400m_src", "xin24m" }; + PNAME(pmu_100m_50m_24m_src_p) = { "clk_pmu1_100m_src", "clk_pmu1_50m_src", "xin24m" }; +-PNAME(pmu_24m_32k_100m_src_p) = { "xin24m", "32k", "clk_pmu1_100m_src" }; ++PNAME(pmu_24m_32k_100m_src_p) = { "xin24m", "xin32k", "clk_pmu1_100m_src" }; + PNAME(hclk_pmu1_root_p) = { "clk_pmu1_200m_src", "clk_pmu1_100m_src", "clk_pmu1_50m_src", "xin24m" }; + PNAME(hclk_pmu_cm0_root_p) = { "clk_pmu1_400m_src", "clk_pmu1_200m_src", "clk_pmu1_100m_src", "xin24m" }; + PNAME(mclk_pdm0_p) = { "clk_pmu1_300m_src", "clk_pmu1_200m_src" }; +diff --git a/drivers/clk/starfive/clk-starfive-jh7110-vout.c b/drivers/clk/starfive/clk-starfive-jh7110-vout.c +index 10cc1ec4392517..36340ca42cc7ed 100644 +--- a/drivers/clk/starfive/clk-starfive-jh7110-vout.c ++++ b/drivers/clk/starfive/clk-starfive-jh7110-vout.c +@@ -145,7 +145,7 @@ static int jh7110_voutcrg_probe(struct platform_device *pdev) + + /* enable power domain and clocks */ + pm_runtime_enable(priv->dev); +- ret = pm_runtime_get_sync(priv->dev); ++ ret = pm_runtime_resume_and_get(priv->dev); + if (ret < 0) + return dev_err_probe(priv->dev, ret, "failed to turn on power\n"); + +diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c +index d964e3affd42ce..0eab7f3e2eab9e 100644 +--- a/drivers/clk/ti/clk-dra7-atl.c ++++ b/drivers/clk/ti/clk-dra7-atl.c +@@ -240,6 +240,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) + } + + clk = of_clk_get_from_provider(&clkspec); ++ of_node_put(clkspec.np); + if (IS_ERR(clk)) { + pr_err("%s: failed to get atl clock %d from provider\n", + __func__, i); +diff --git a/drivers/clocksource/timer-qcom.c b/drivers/clocksource/timer-qcom.c +index b4afe3a6758351..eac4c95c6127f2 100644 +--- a/drivers/clocksource/timer-qcom.c ++++ b/drivers/clocksource/timer-qcom.c +@@ -233,6 +233,7 @@ static int __init msm_dt_timer_init(struct device_node *np) + } + + if (of_property_read_u32(np, "clock-frequency", &freq)) { ++ iounmap(cpu0_base); + pr_err("Unknown frequency\n"); + return -EINVAL; + } +@@ -243,7 +244,11 @@ static int __init msm_dt_timer_init(struct device_node *np) + freq /= 4; + writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL); + +- return msm_timer_init(freq, 32, irq, !!percpu_offset); ++ ret = msm_timer_init(freq, 32, irq, !!percpu_offset); ++ if (ret) ++ iounmap(cpu0_base); ++ ++ return ret; + } + TIMER_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init); + TIMER_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init); +diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c +index d88ee87b1cd6f3..cb5d1c8fefeb44 100644 +--- a/drivers/cpufreq/ti-cpufreq.c ++++ b/drivers/cpufreq/ti-cpufreq.c +@@ -61,6 +61,9 @@ struct ti_cpufreq_soc_data { + unsigned long efuse_shift; + unsigned long rev_offset; + bool multi_regulator; ++/* Backward compatibility hack: Might have missing syscon */ ++#define TI_QUIRK_SYSCON_MAY_BE_MISSING 0x1 ++ u8 quirks; + }; + + struct ti_cpufreq_data { +@@ -182,6 +185,7 @@ static struct ti_cpufreq_soc_data omap34xx_soc_data = { + .efuse_mask = BIT(3), + .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE, + .multi_regulator = false, ++ .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING, + }; + + /* +@@ -209,6 +213,7 @@ static struct ti_cpufreq_soc_data omap36xx_soc_data = { + .efuse_mask = BIT(9), + .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE, + .multi_regulator = true, ++ .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING, + }; + + /* +@@ -223,6 +228,7 @@ static struct ti_cpufreq_soc_data am3517_soc_data = { + .efuse_mask = 0, + .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE, + .multi_regulator = false, ++ .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING, + }; + + static struct ti_cpufreq_soc_data am625_soc_data = { +@@ -250,7 +256,7 @@ static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data, + + ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset, + &efuse); +- if (ret == -EIO) { ++ if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) { + /* not a syscon register! */ + void __iomem *regs = ioremap(OMAP3_SYSCON_BASE + + opp_data->soc_data->efuse_offset, 4); +@@ -291,7 +297,7 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data, + + ret = regmap_read(opp_data->syscon, opp_data->soc_data->rev_offset, + &revision); +- if (ret == -EIO) { ++ if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) { + /* not a syscon register! */ + void __iomem *regs = ioremap(OMAP3_SYSCON_BASE + + opp_data->soc_data->rev_offset, 4); +diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c +index e8094fc92491eb..c0fe92409175a4 100644 +--- a/drivers/cpuidle/cpuidle-riscv-sbi.c ++++ b/drivers/cpuidle/cpuidle-riscv-sbi.c +@@ -8,6 +8,7 @@ + + #define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt + ++#include + #include + #include + #include +@@ -267,19 +268,16 @@ static int sbi_cpuidle_dt_init_states(struct device *dev, + { + struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu); + struct device_node *state_node; +- struct device_node *cpu_node; + u32 *states; + int i, ret; + +- cpu_node = of_cpu_device_node_get(cpu); ++ struct device_node *cpu_node __free(device_node) = of_cpu_device_node_get(cpu); + if (!cpu_node) + return -ENODEV; + + states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL); +- if (!states) { +- ret = -ENOMEM; +- goto fail; +- } ++ if (!states) ++ return -ENOMEM; + + /* Parse SBI specific details from state DT nodes */ + for (i = 1; i < state_count; i++) { +@@ -295,10 +293,8 @@ static int sbi_cpuidle_dt_init_states(struct device *dev, + + pr_debug("sbi-state %#x index %d\n", states[i], i); + } +- if (i != state_count) { +- ret = -ENODEV; +- goto fail; +- } ++ if (i != state_count) ++ return -ENODEV; + + /* Initialize optional data, used for the hierarchical topology. */ + ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu); +@@ -308,10 +304,7 @@ static int sbi_cpuidle_dt_init_states(struct device *dev, + /* Store states in the per-cpu struct. */ + data->states = states; + +-fail: +- of_node_put(cpu_node); +- +- return ret; ++ return 0; + } + + static void sbi_cpuidle_deinit_cpu(int cpu) +diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c +index 290c8500c247f9..65785dc5b73b2b 100644 +--- a/drivers/crypto/caam/caamhash.c ++++ b/drivers/crypto/caam/caamhash.c +@@ -708,6 +708,7 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req, + GFP_KERNEL : GFP_ATOMIC; + struct ahash_edesc *edesc; + ++ sg_num = pad_sg_nents(sg_num); + edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags); + if (!edesc) + return NULL; +diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c +index 17fb01853dbf40..07e6f782b62252 100644 +--- a/drivers/crypto/ccp/sev-dev.c ++++ b/drivers/crypto/ccp/sev-dev.c +@@ -1367,6 +1367,8 @@ void sev_pci_init(void) + return; + + err: ++ sev_dev_destroy(psp_master); ++ + psp_master->sev_data = NULL; + } + +diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c +index b97ce0ee71406d..3463f5ee83c0df 100644 +--- a/drivers/crypto/hisilicon/hpre/hpre_main.c ++++ b/drivers/crypto/hisilicon/hpre/hpre_main.c +@@ -13,9 +13,7 @@ + #include + #include "hpre.h" + +-#define HPRE_QM_ABNML_INT_MASK 0x100004 + #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0) +-#define HPRE_COMM_CNT_CLR_CE 0x0 + #define HPRE_CTRL_CNT_CLR_CE 0x301000 + #define HPRE_FSM_MAX_CNT 0x301008 + #define HPRE_VFG_AXQOS 0x30100c +@@ -42,7 +40,6 @@ + #define HPRE_HAC_INT_SET 0x301500 + #define HPRE_RNG_TIMEOUT_NUM 0x301A34 + #define HPRE_CORE_INT_ENABLE 0 +-#define HPRE_CORE_INT_DISABLE GENMASK(21, 0) + #define HPRE_RDCHN_INI_ST 0x301a00 + #define HPRE_CLSTR_BASE 0x302000 + #define HPRE_CORE_EN_OFFSET 0x04 +@@ -66,7 +63,6 @@ + #define HPRE_CLSTR_ADDR_INTRVL 0x1000 + #define HPRE_CLUSTER_INQURY 0x100 + #define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104 +-#define HPRE_TIMEOUT_ABNML_BIT 6 + #define HPRE_PASID_EN_BIT 9 + #define HPRE_REG_RD_INTVRL_US 10 + #define HPRE_REG_RD_TMOUT_US 1000 +@@ -202,9 +198,9 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = { + {HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37}, + {HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37}, + {HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8}, +- {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFFFE}, +- {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE}, +- {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE}, ++ {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFC3E}, ++ {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFC3E}, ++ {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFC3E}, + {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1}, + {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1}, + {HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2}, +@@ -357,6 +353,8 @@ static struct dfx_diff_registers hpre_diff_regs[] = { + }, + }; + ++static const struct hisi_qm_err_ini hpre_err_ini; ++ + bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg) + { + u32 cap_val; +@@ -653,11 +651,6 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) + writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE); + writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG); + +- /* HPRE need more time, we close this interrupt */ +- val = readl_relaxed(qm->io_base + HPRE_QM_ABNML_INT_MASK); +- val |= BIT(HPRE_TIMEOUT_ABNML_BIT); +- writel_relaxed(val, qm->io_base + HPRE_QM_ABNML_INT_MASK); +- + if (qm->ver >= QM_HW_V3) + writel(HPRE_RSA_ENB | HPRE_ECC_ENB, + qm->io_base + HPRE_TYPES_ENB); +@@ -666,9 +659,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) + + writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE); + writel(0x0, qm->io_base + HPRE_BD_ENDIAN); +- writel(0x0, qm->io_base + HPRE_INT_MASK); + writel(0x0, qm->io_base + HPRE_POISON_BYPASS); +- writel(0x0, qm->io_base + HPRE_COMM_CNT_CLR_CE); + writel(0x0, qm->io_base + HPRE_ECC_BYPASS); + + writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG); +@@ -758,7 +749,7 @@ static void hpre_hw_error_disable(struct hisi_qm *qm) + + static void hpre_hw_error_enable(struct hisi_qm *qm) + { +- u32 ce, nfe; ++ u32 ce, nfe, err_en; + + ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); + nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); +@@ -775,7 +766,8 @@ static void hpre_hw_error_enable(struct hisi_qm *qm) + hpre_master_ooo_ctrl(qm, true); + + /* enable hpre hw error interrupts */ +- writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK); ++ err_en = ce | nfe | HPRE_HAC_RAS_FE_ENABLE; ++ writel(~err_en, qm->io_base + HPRE_INT_MASK); + } + + static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file) +@@ -1161,6 +1153,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) + qm->qp_num = pf_q_num; + qm->debug.curr_qm_qp_num = pf_q_num; + qm->qm_list = &hpre_devices; ++ qm->err_ini = &hpre_err_ini; + if (pf_q_num_flag) + set_bit(QM_MODULE_PARAM, &qm->misc_ctl); + } +@@ -1350,8 +1343,6 @@ static int hpre_pf_probe_init(struct hpre *hpre) + + hpre_open_sva_prefetch(qm); + +- qm->err_ini = &hpre_err_ini; +- qm->err_ini->err_info_init(qm); + hisi_qm_dev_err_init(qm); + ret = hpre_show_last_regs_init(qm); + if (ret) +@@ -1380,6 +1371,18 @@ static int hpre_probe_init(struct hpre *hpre) + return 0; + } + ++static void hpre_probe_uninit(struct hisi_qm *qm) ++{ ++ if (qm->fun_type == QM_HW_VF) ++ return; ++ ++ hpre_cnt_regs_clear(qm); ++ qm->debug.curr_qm_qp_num = 0; ++ hpre_show_last_regs_uninit(qm); ++ hpre_close_sva_prefetch(qm); ++ hisi_qm_dev_err_uninit(qm); ++} ++ + static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) + { + struct hisi_qm *qm; +@@ -1405,7 +1408,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) + + ret = hisi_qm_start(qm); + if (ret) +- goto err_with_err_init; ++ goto err_with_probe_init; + + ret = hpre_debugfs_init(qm); + if (ret) +@@ -1442,9 +1445,8 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) + hpre_debugfs_exit(qm); + hisi_qm_stop(qm, QM_NORMAL); + +-err_with_err_init: +- hpre_show_last_regs_uninit(qm); +- hisi_qm_dev_err_uninit(qm); ++err_with_probe_init: ++ hpre_probe_uninit(qm); + + err_with_qm_init: + hisi_qm_uninit(qm); +@@ -1465,13 +1467,7 @@ static void hpre_remove(struct pci_dev *pdev) + hpre_debugfs_exit(qm); + hisi_qm_stop(qm, QM_NORMAL); + +- if (qm->fun_type == QM_HW_PF) { +- hpre_cnt_regs_clear(qm); +- qm->debug.curr_qm_qp_num = 0; +- hpre_show_last_regs_uninit(qm); +- hisi_qm_dev_err_uninit(qm); +- } +- ++ hpre_probe_uninit(qm); + hisi_qm_uninit(qm); + } + +diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c +index 562df5c77c636b..1b00edbbfe26a9 100644 +--- a/drivers/crypto/hisilicon/qm.c ++++ b/drivers/crypto/hisilicon/qm.c +@@ -455,6 +455,7 @@ static struct qm_typical_qos_table shaper_cbs_s[] = { + }; + + static void qm_irqs_unregister(struct hisi_qm *qm); ++static int qm_reset_device(struct hisi_qm *qm); + + static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new) + { +@@ -4105,6 +4106,28 @@ static int qm_set_vf_mse(struct hisi_qm *qm, bool set) + return -ETIMEDOUT; + } + ++static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) ++{ ++ u32 nfe_enb = 0; ++ ++ /* Kunpeng930 hardware automatically close master ooo when NFE occurs */ ++ if (qm->ver >= QM_HW_V3) ++ return; ++ ++ if (!qm->err_status.is_dev_ecc_mbit && ++ qm->err_status.is_qm_ecc_mbit && ++ qm->err_ini->close_axi_master_ooo) { ++ qm->err_ini->close_axi_master_ooo(qm); ++ } else if (qm->err_status.is_dev_ecc_mbit && ++ !qm->err_status.is_qm_ecc_mbit && ++ !qm->err_ini->close_axi_master_ooo) { ++ nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); ++ writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, ++ qm->io_base + QM_RAS_NFE_ENABLE); ++ writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); ++ } ++} ++ + static int qm_vf_reset_prepare(struct hisi_qm *qm, + enum qm_stop_reason stop_reason) + { +@@ -4169,6 +4192,8 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm) + return ret; + } + ++ qm_dev_ecc_mbit_handle(qm); ++ + /* PF obtains the information of VF by querying the register. */ + qm_cmd_uninit(qm); + +@@ -4199,33 +4224,26 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm) + return 0; + } + +-static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) ++static int qm_master_ooo_check(struct hisi_qm *qm) + { +- u32 nfe_enb = 0; ++ u32 val; ++ int ret; + +- /* Kunpeng930 hardware automatically close master ooo when NFE occurs */ +- if (qm->ver >= QM_HW_V3) +- return; ++ /* Check the ooo register of the device before resetting the device. */ ++ writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + ACC_MASTER_GLOBAL_CTRL); ++ ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, ++ val, (val == ACC_MASTER_TRANS_RETURN_RW), ++ POLL_PERIOD, POLL_TIMEOUT); ++ if (ret) ++ pci_warn(qm->pdev, "Bus lock! Please reset system.\n"); + +- if (!qm->err_status.is_dev_ecc_mbit && +- qm->err_status.is_qm_ecc_mbit && +- qm->err_ini->close_axi_master_ooo) { +- qm->err_ini->close_axi_master_ooo(qm); +- } else if (qm->err_status.is_dev_ecc_mbit && +- !qm->err_status.is_qm_ecc_mbit && +- !qm->err_ini->close_axi_master_ooo) { +- nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); +- writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, +- qm->io_base + QM_RAS_NFE_ENABLE); +- writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); +- } ++ return ret; + } + +-static int qm_soft_reset(struct hisi_qm *qm) ++static int qm_soft_reset_prepare(struct hisi_qm *qm) + { + struct pci_dev *pdev = qm->pdev; + int ret; +- u32 val; + + /* Ensure all doorbells and mailboxes received by QM */ + ret = qm_check_req_recv(qm); +@@ -4246,30 +4264,23 @@ static int qm_soft_reset(struct hisi_qm *qm) + return ret; + } + +- qm_dev_ecc_mbit_handle(qm); +- +- /* OOO register set and check */ +- writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, +- qm->io_base + ACC_MASTER_GLOBAL_CTRL); +- +- /* If bus lock, reset chip */ +- ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, +- val, +- (val == ACC_MASTER_TRANS_RETURN_RW), +- POLL_PERIOD, POLL_TIMEOUT); +- if (ret) { +- pci_emerg(pdev, "Bus lock! Please reset system.\n"); ++ ret = qm_master_ooo_check(qm); ++ if (ret) + return ret; +- } + + if (qm->err_ini->close_sva_prefetch) + qm->err_ini->close_sva_prefetch(qm); + + ret = qm_set_pf_mse(qm, false); +- if (ret) { ++ if (ret) + pci_err(pdev, "Fails to disable pf MSE bit.\n"); +- return ret; +- } ++ ++ return ret; ++} ++ ++static int qm_reset_device(struct hisi_qm *qm) ++{ ++ struct pci_dev *pdev = qm->pdev; + + /* The reset related sub-control registers are not in PCI BAR */ + if (ACPI_HANDLE(&pdev->dev)) { +@@ -4288,12 +4299,23 @@ static int qm_soft_reset(struct hisi_qm *qm) + pci_err(pdev, "Reset step %llu failed!\n", value); + return -EIO; + } +- } else { +- pci_err(pdev, "No reset method!\n"); +- return -EINVAL; ++ ++ return 0; + } + +- return 0; ++ pci_err(pdev, "No reset method!\n"); ++ return -EINVAL; ++} ++ ++static int qm_soft_reset(struct hisi_qm *qm) ++{ ++ int ret; ++ ++ ret = qm_soft_reset_prepare(qm); ++ if (ret) ++ return ret; ++ ++ return qm_reset_device(qm); + } + + static int qm_vf_reset_done(struct hisi_qm *qm) +@@ -5261,6 +5283,35 @@ static int qm_get_pci_res(struct hisi_qm *qm) + return ret; + } + ++static int qm_clear_device(struct hisi_qm *qm) ++{ ++ acpi_handle handle = ACPI_HANDLE(&qm->pdev->dev); ++ int ret; ++ ++ if (qm->fun_type == QM_HW_VF) ++ return 0; ++ ++ /* Device does not support reset, return */ ++ if (!qm->err_ini->err_info_init) ++ return 0; ++ qm->err_ini->err_info_init(qm); ++ ++ if (!handle) ++ return 0; ++ ++ /* No reset method, return */ ++ if (!acpi_has_method(handle, qm->err_info.acpi_rst)) ++ return 0; ++ ++ ret = qm_master_ooo_check(qm); ++ if (ret) { ++ writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL); ++ return ret; ++ } ++ ++ return qm_reset_device(qm); ++} ++ + static int hisi_qm_pci_init(struct hisi_qm *qm) + { + struct pci_dev *pdev = qm->pdev; +@@ -5290,8 +5341,14 @@ static int hisi_qm_pci_init(struct hisi_qm *qm) + goto err_get_pci_res; + } + ++ ret = qm_clear_device(qm); ++ if (ret) ++ goto err_free_vectors; ++ + return 0; + ++err_free_vectors: ++ pci_free_irq_vectors(pdev); + err_get_pci_res: + qm_put_pci_res(qm); + err_disable_pcidev: +@@ -5557,7 +5614,6 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm) + { + struct pci_dev *pdev = qm->pdev; + int ret; +- u32 val; + + ret = qm->ops->set_msi(qm, false); + if (ret) { +@@ -5565,18 +5621,9 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm) + return ret; + } + +- /* shutdown OOO register */ +- writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, +- qm->io_base + ACC_MASTER_GLOBAL_CTRL); +- +- ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, +- val, +- (val == ACC_MASTER_TRANS_RETURN_RW), +- POLL_PERIOD, POLL_TIMEOUT); +- if (ret) { +- pci_emerg(pdev, "Bus lock! Please reset system.\n"); ++ ret = qm_master_ooo_check(qm); ++ if (ret) + return ret; +- } + + ret = qm_set_pf_mse(qm, false); + if (ret) +diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c +index bf02a6b2eed412..cf7b6a37e7df7a 100644 +--- a/drivers/crypto/hisilicon/sec2/sec_main.c ++++ b/drivers/crypto/hisilicon/sec2/sec_main.c +@@ -1061,9 +1061,6 @@ static int sec_pf_probe_init(struct sec_dev *sec) + struct hisi_qm *qm = &sec->qm; + int ret; + +- qm->err_ini = &sec_err_ini; +- qm->err_ini->err_info_init(qm); +- + ret = sec_set_user_domain_and_cache(qm); + if (ret) + return ret; +@@ -1118,6 +1115,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) + qm->qp_num = pf_q_num; + qm->debug.curr_qm_qp_num = pf_q_num; + qm->qm_list = &sec_devices; ++ qm->err_ini = &sec_err_ini; + if (pf_q_num_flag) + set_bit(QM_MODULE_PARAM, &qm->misc_ctl); + } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { +@@ -1182,6 +1180,12 @@ static int sec_probe_init(struct sec_dev *sec) + + static void sec_probe_uninit(struct hisi_qm *qm) + { ++ if (qm->fun_type == QM_HW_VF) ++ return; ++ ++ sec_debug_regs_clear(qm); ++ sec_show_last_regs_uninit(qm); ++ sec_close_sva_prefetch(qm); + hisi_qm_dev_err_uninit(qm); + } + +@@ -1274,7 +1278,6 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) + sec_debugfs_exit(qm); + hisi_qm_stop(qm, QM_NORMAL); + err_probe_uninit: +- sec_show_last_regs_uninit(qm); + sec_probe_uninit(qm); + err_qm_uninit: + sec_qm_uninit(qm); +@@ -1296,11 +1299,6 @@ static void sec_remove(struct pci_dev *pdev) + sec_debugfs_exit(qm); + + (void)hisi_qm_stop(qm, QM_NORMAL); +- +- if (qm->fun_type == QM_HW_PF) +- sec_debug_regs_clear(qm); +- sec_show_last_regs_uninit(qm); +- + sec_probe_uninit(qm); + + sec_qm_uninit(qm); +diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c +index cd7ecb2180bf15..9d47b3675da7d4 100644 +--- a/drivers/crypto/hisilicon/zip/zip_main.c ++++ b/drivers/crypto/hisilicon/zip/zip_main.c +@@ -1150,8 +1150,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) + + hisi_zip->ctrl = ctrl; + ctrl->hisi_zip = hisi_zip; +- qm->err_ini = &hisi_zip_err_ini; +- qm->err_ini->err_info_init(qm); + + ret = hisi_zip_set_user_domain_and_cache(qm); + if (ret) +@@ -1212,6 +1210,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) + qm->qp_num = pf_q_num; + qm->debug.curr_qm_qp_num = pf_q_num; + qm->qm_list = &zip_devices; ++ qm->err_ini = &hisi_zip_err_ini; + if (pf_q_num_flag) + set_bit(QM_MODULE_PARAM, &qm->misc_ctl); + } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { +@@ -1278,6 +1277,16 @@ static int hisi_zip_probe_init(struct hisi_zip *hisi_zip) + return 0; + } + ++static void hisi_zip_probe_uninit(struct hisi_qm *qm) ++{ ++ if (qm->fun_type == QM_HW_VF) ++ return; ++ ++ hisi_zip_show_last_regs_uninit(qm); ++ hisi_zip_close_sva_prefetch(qm); ++ hisi_qm_dev_err_uninit(qm); ++} ++ + static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) + { + struct hisi_zip *hisi_zip; +@@ -1304,7 +1313,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) + + ret = hisi_qm_start(qm); + if (ret) +- goto err_dev_err_uninit; ++ goto err_probe_uninit; + + ret = hisi_zip_debugfs_init(qm); + if (ret) +@@ -1341,9 +1350,8 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) + hisi_zip_debugfs_exit(qm); + hisi_qm_stop(qm, QM_NORMAL); + +-err_dev_err_uninit: +- hisi_zip_show_last_regs_uninit(qm); +- hisi_qm_dev_err_uninit(qm); ++err_probe_uninit: ++ hisi_zip_probe_uninit(qm); + + err_qm_uninit: + hisi_zip_qm_uninit(qm); +@@ -1364,8 +1372,7 @@ static void hisi_zip_remove(struct pci_dev *pdev) + + hisi_zip_debugfs_exit(qm); + hisi_qm_stop(qm, QM_NORMAL); +- hisi_zip_show_last_regs_uninit(qm); +- hisi_qm_dev_err_uninit(qm); ++ hisi_zip_probe_uninit(qm); + hisi_zip_qm_uninit(qm); + } + +diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c +index c963cd9e88d16c..6edfd054667373 100644 +--- a/drivers/cxl/core/pci.c ++++ b/drivers/cxl/core/pci.c +@@ -388,10 +388,6 @@ int cxl_dvsec_rr_decode(struct device *dev, int d, + + size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK; + if (!size) { +- info->dvsec_range[i] = (struct range) { +- .start = 0, +- .end = CXL_RESOURCE_NONE, +- }; + continue; + } + +@@ -409,12 +405,10 @@ int cxl_dvsec_rr_decode(struct device *dev, int d, + + base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK; + +- info->dvsec_range[i] = (struct range) { ++ info->dvsec_range[ranges++] = (struct range) { + .start = base, + .end = base + size - 1 + }; +- +- ranges++; + } + + info->ranges = ranges; +diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c +index a2984e9bed3a77..a0edb61a5a01ac 100644 +--- a/drivers/edac/igen6_edac.c ++++ b/drivers/edac/igen6_edac.c +@@ -245,7 +245,7 @@ static u64 ehl_err_addr_to_imc_addr(u64 eaddr, int mc) + if (igen6_tom <= _4GB) + return eaddr + igen6_tolud - _4GB; + +- if (eaddr < _4GB) ++ if (eaddr >= igen6_tom) + return eaddr + igen6_tolud - igen6_tom; + + return eaddr; +diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c +index c4fc64cbecd0e6..6ddc90d7ba7c2a 100644 +--- a/drivers/edac/synopsys_edac.c ++++ b/drivers/edac/synopsys_edac.c +@@ -9,6 +9,8 @@ + #include + #include + #include ++#include ++#include + #include + #include + +@@ -299,6 +301,7 @@ struct synps_ecc_status { + /** + * struct synps_edac_priv - DDR memory controller private instance data. + * @baseaddr: Base address of the DDR controller. ++ * @reglock: Concurrent CSRs access lock. + * @message: Buffer for framing the event specific info. + * @stat: ECC status information. + * @p_data: Platform data. +@@ -313,6 +316,7 @@ struct synps_ecc_status { + */ + struct synps_edac_priv { + void __iomem *baseaddr; ++ spinlock_t reglock; + char message[SYNPS_EDAC_MSG_SIZE]; + struct synps_ecc_status stat; + const struct synps_platform_data *p_data; +@@ -334,6 +338,7 @@ struct synps_edac_priv { + * @get_mtype: Get mtype. + * @get_dtype: Get dtype. + * @get_ecc_state: Get ECC state. ++ * @get_mem_info: Get EDAC memory info + * @quirks: To differentiate IPs. + */ + struct synps_platform_data { +@@ -341,6 +346,9 @@ struct synps_platform_data { + enum mem_type (*get_mtype)(const void __iomem *base); + enum dev_type (*get_dtype)(const void __iomem *base); + bool (*get_ecc_state)(void __iomem *base); ++#ifdef CONFIG_EDAC_DEBUG ++ u64 (*get_mem_info)(struct synps_edac_priv *priv); ++#endif + int quirks; + }; + +@@ -399,6 +407,25 @@ static int zynq_get_error_info(struct synps_edac_priv *priv) + return 0; + } + ++#ifdef CONFIG_EDAC_DEBUG ++/** ++ * zynqmp_get_mem_info - Get the current memory info. ++ * @priv: DDR memory controller private instance data. ++ * ++ * Return: host interface address. ++ */ ++static u64 zynqmp_get_mem_info(struct synps_edac_priv *priv) ++{ ++ u64 hif_addr = 0, linear_addr; ++ ++ linear_addr = priv->poison_addr; ++ if (linear_addr >= SZ_32G) ++ linear_addr = linear_addr - SZ_32G + SZ_2G; ++ hif_addr = linear_addr >> 3; ++ return hif_addr; ++} ++#endif ++ + /** + * zynqmp_get_error_info - Get the current ECC error info. + * @priv: DDR memory controller private instance data. +@@ -408,7 +435,8 @@ static int zynq_get_error_info(struct synps_edac_priv *priv) + static int zynqmp_get_error_info(struct synps_edac_priv *priv) + { + struct synps_ecc_status *p; +- u32 regval, clearval = 0; ++ u32 regval, clearval; ++ unsigned long flags; + void __iomem *base; + + base = priv->baseaddr; +@@ -452,10 +480,14 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv) + p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK); + p->ueinfo.data = readl(base + ECC_UESYND0_OFST); + out: +- clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT; +- clearval |= ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT; ++ spin_lock_irqsave(&priv->reglock, flags); ++ ++ clearval = readl(base + ECC_CLR_OFST) | ++ ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT | ++ ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT; + writel(clearval, base + ECC_CLR_OFST); +- writel(0x0, base + ECC_CLR_OFST); ++ ++ spin_unlock_irqrestore(&priv->reglock, flags); + + return 0; + } +@@ -515,24 +547,41 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p) + + static void enable_intr(struct synps_edac_priv *priv) + { ++ unsigned long flags; ++ + /* Enable UE/CE Interrupts */ +- if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR) +- writel(DDR_UE_MASK | DDR_CE_MASK, +- priv->baseaddr + ECC_CLR_OFST); +- else ++ if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) { + writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK, + priv->baseaddr + DDR_QOS_IRQ_EN_OFST); + ++ return; ++ } ++ ++ spin_lock_irqsave(&priv->reglock, flags); ++ ++ writel(DDR_UE_MASK | DDR_CE_MASK, ++ priv->baseaddr + ECC_CLR_OFST); ++ ++ spin_unlock_irqrestore(&priv->reglock, flags); + } + + static void disable_intr(struct synps_edac_priv *priv) + { ++ unsigned long flags; ++ + /* Disable UE/CE Interrupts */ +- if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR) +- writel(0x0, priv->baseaddr + ECC_CLR_OFST); +- else ++ if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) { + writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK, + priv->baseaddr + DDR_QOS_IRQ_DB_OFST); ++ ++ return; ++ } ++ ++ spin_lock_irqsave(&priv->reglock, flags); ++ ++ writel(0, priv->baseaddr + ECC_CLR_OFST); ++ ++ spin_unlock_irqrestore(&priv->reglock, flags); + } + + /** +@@ -576,8 +625,6 @@ static irqreturn_t intr_handler(int irq, void *dev_id) + /* v3.0 of the controller does not have this register */ + if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) + writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST); +- else +- enable_intr(priv); + + return IRQ_HANDLED; + } +@@ -899,6 +946,9 @@ static const struct synps_platform_data zynqmp_edac_def = { + .get_mtype = zynqmp_get_mtype, + .get_dtype = zynqmp_get_dtype, + .get_ecc_state = zynqmp_get_ecc_state, ++#ifdef CONFIG_EDAC_DEBUG ++ .get_mem_info = zynqmp_get_mem_info, ++#endif + .quirks = (DDR_ECC_INTR_SUPPORT + #ifdef CONFIG_EDAC_DEBUG + | DDR_ECC_DATA_POISON_SUPPORT +@@ -952,10 +1002,16 @@ MODULE_DEVICE_TABLE(of, synps_edac_match); + static void ddr_poison_setup(struct synps_edac_priv *priv) + { + int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval; ++ const struct synps_platform_data *p_data; + int index; + ulong hif_addr = 0; + +- hif_addr = priv->poison_addr >> 3; ++ p_data = priv->p_data; ++ ++ if (p_data->get_mem_info) ++ hif_addr = p_data->get_mem_info(priv); ++ else ++ hif_addr = priv->poison_addr >> 3; + + for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) { + if (priv->row_shift[index]) +@@ -1359,6 +1415,7 @@ static int mc_probe(struct platform_device *pdev) + priv = mci->pvt_info; + priv->baseaddr = baseaddr; + priv->p_data = p_data; ++ spin_lock_init(&priv->reglock); + + mc_init(mci, pdev); + +diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c +index 6274b86eb94377..73cc2f2dcbf923 100644 +--- a/drivers/firewire/core-cdev.c ++++ b/drivers/firewire/core-cdev.c +@@ -598,11 +598,11 @@ static void complete_transaction(struct fw_card *card, int rcode, u32 request_ts + queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0); + + break; ++ } + default: + WARN_ON(1); + break; + } +- } + + /* Drop the idr's reference */ + client_put(client); +diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c +index e123de6e8c67a9..aa02392265d326 100644 +--- a/drivers/firmware/arm_scmi/optee.c ++++ b/drivers/firmware/arm_scmi/optee.c +@@ -467,6 +467,13 @@ static int scmi_optee_chan_free(int id, void *p, void *data) + struct scmi_chan_info *cinfo = p; + struct scmi_optee_channel *channel = cinfo->transport_info; + ++ /* ++ * Different protocols might share the same chan info, so a previous ++ * call might have already freed the structure. ++ */ ++ if (!channel) ++ return 0; ++ + mutex_lock(&scmi_optee_private->mu); + list_del(&channel->link); + mutex_unlock(&scmi_optee_private->mu); +diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c +index 7acbac16eae0b2..95da291c3083ef 100644 +--- a/drivers/firmware/efi/libstub/tpm.c ++++ b/drivers/firmware/efi/libstub/tpm.c +@@ -115,7 +115,7 @@ void efi_retrieve_tpm2_eventlog(void) + } + + /* Allocate space for the logs and copy them. */ +- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, ++ status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY, + sizeof(*log_tbl) + log_size, (void **)&log_tbl); + + if (status != EFI_SUCCESS) { +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +index 104a5ad8397da7..198687545407e9 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +@@ -209,7 +209,7 @@ struct amd_sriov_msg_pf2vf_info { + uint32_t pcie_atomic_ops_support_flags; + /* reserved */ + uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE]; +-}; ++} __packed; + + struct amd_sriov_msg_vf2pf_info_header { + /* the total structure size in byte */ +@@ -267,7 +267,7 @@ struct amd_sriov_msg_vf2pf_info { + + /* reserved */ + uint32_t reserved[256 - AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE]; +-}; ++} __packed; + + /* mailbox message send from guest to host */ + enum amd_sriov_mailbox_request_message { +diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +index d95b2dc7806341..157e898dc3820d 100644 +--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c ++++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +@@ -2065,26 +2065,29 @@ amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder) + fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record; + if (fake_edid_record->ucFakeEDIDLength) { + struct edid *edid; +- int edid_size = +- max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength); +- edid = kmalloc(edid_size, GFP_KERNEL); ++ int edid_size; ++ ++ if (fake_edid_record->ucFakeEDIDLength == 128) ++ edid_size = fake_edid_record->ucFakeEDIDLength; ++ else ++ edid_size = fake_edid_record->ucFakeEDIDLength * 128; ++ edid = kmemdup(&fake_edid_record->ucFakeEDIDString[0], ++ edid_size, GFP_KERNEL); + if (edid) { +- memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0], +- fake_edid_record->ucFakeEDIDLength); +- + if (drm_edid_is_valid(edid)) { + adev->mode_info.bios_hardcoded_edid = edid; + adev->mode_info.bios_hardcoded_edid_size = edid_size; +- } else ++ } else { + kfree(edid); ++ } + } ++ record += struct_size(fake_edid_record, ++ ucFakeEDIDString, ++ edid_size); ++ } else { ++ /* empty fake edid record must be 3 bytes long */ ++ record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1; + } +- record += fake_edid_record->ucFakeEDIDLength ? +- struct_size(fake_edid_record, +- ucFakeEDIDString, +- fake_edid_record->ucFakeEDIDLength) : +- /* empty fake edid record must be 3 bytes long */ +- sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1; + break; + case LCD_PANEL_RESOLUTION_RECORD_TYPE: + panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index f0ebf686b06f22..7731ce7762b77d 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -4037,6 +4037,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) + + #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 + #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 ++#define AMDGPU_DM_MIN_SPREAD ((AMDGPU_DM_DEFAULT_MAX_BACKLIGHT - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT) / 2) + #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 + + static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, +@@ -4051,6 +4052,21 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, + return; + + amdgpu_acpi_get_backlight_caps(&caps); ++ ++ /* validate the firmware value is sane */ ++ if (caps.caps_valid) { ++ int spread = caps.max_input_signal - caps.min_input_signal; ++ ++ if (caps.max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || ++ caps.min_input_signal < AMDGPU_DM_DEFAULT_MIN_BACKLIGHT || ++ spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || ++ spread < AMDGPU_DM_MIN_SPREAD) { ++ DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n", ++ caps.min_input_signal, caps.max_input_signal); ++ caps.caps_valid = false; ++ } ++ } ++ + if (caps.caps_valid) { + dm->backlight_caps[bl_idx].caps_valid = true; + if (caps.aux_support) +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +index 3880ddf1c820fd..7fb11445a28fa5 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +@@ -246,7 +246,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto + aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux; + + /* synaptics cascaded MST hub case */ +- if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port)) ++ if (is_synaptics_cascaded_panamera(aconnector->dc_link, port)) + aconnector->dsc_aux = port->mgr->aux; + + if (!aconnector->dsc_aux) +@@ -1115,7 +1115,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, + params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; + params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel; + params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported; +- dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy); ++ dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link)); + if (!dc_dsc_compute_bandwidth_range( + stream->sink->ctx->dc->res_pool->dscs[0], + stream->sink->ctx->dc->debug.dsc_min_slice_height_override, +@@ -1266,6 +1266,9 @@ static bool is_dsc_need_re_compute( + } + } + ++ if (new_stream_on_link_num == 0) ++ return false; ++ + if (new_stream_on_link_num == 0) + return false; + +@@ -1583,7 +1586,7 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream, + { + struct dc_dsc_policy dsc_policy = {0}; + +- dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy); ++ dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link)); + dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0], + stream->sink->ctx->dc->debug.dsc_min_slice_height_override, + dsc_policy.min_target_bpp * 16, +diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h +index fe3078b8789ef1..01c07545ef6b47 100644 +--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h ++++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h +@@ -100,7 +100,8 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps( + */ + void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, + uint32_t max_target_bpp_limit_override_x16, +- struct dc_dsc_policy *policy); ++ struct dc_dsc_policy *policy, ++ const enum dc_link_encoding_format link_encoding); + + void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit); + +diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +index ba47a1c8eec107..d59af329d0009e 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +@@ -214,7 +214,11 @@ bool dcn30_set_output_transfer_func(struct dc *dc, + } + } + +- mpc->funcs->set_output_gamma(mpc, mpcc_id, params); ++ if (mpc->funcs->set_output_gamma) ++ mpc->funcs->set_output_gamma(mpc, mpcc_id, params); ++ else ++ DC_LOG_ERROR("%s: set_output_gamma function pointer is NULL.\n", __func__); ++ + return ret; + } + +diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +index 230be292ff35b5..9edc9b0e3f082d 100644 +--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c ++++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +@@ -861,7 +861,7 @@ static bool setup_dsc_config( + + memset(dsc_cfg, 0, sizeof(struct dc_dsc_config)); + +- dc_dsc_get_policy_for_timing(timing, options->max_target_bpp_limit_override_x16, &policy); ++ dc_dsc_get_policy_for_timing(timing, options->max_target_bpp_limit_override_x16, &policy, link_encoding); + pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right; + pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; + +@@ -1134,7 +1134,8 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps( + + void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, + uint32_t max_target_bpp_limit_override_x16, +- struct dc_dsc_policy *policy) ++ struct dc_dsc_policy *policy, ++ const enum dc_link_encoding_format link_encoding) + { + uint32_t bpc = 0; + +diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +index ef3a674090211c..803586f4267af8 100644 +--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c ++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +@@ -133,7 +133,7 @@ unsigned int mod_freesync_calc_v_total_from_refresh( + + v_total = div64_u64(div64_u64(((unsigned long long)( + frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), +- stream->timing.h_total), 1000000); ++ stream->timing.h_total) + 500000, 1000000); + + /* v_total cannot be less than nominal */ + if (v_total < stream->timing.v_total) { +diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c +index 0efcbc73f2a432..5e43a40a5d5221 100644 +--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c ++++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c +@@ -411,22 +411,6 @@ static const struct drm_connector_funcs lt8912_connector_funcs = { + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + }; + +-static enum drm_mode_status +-lt8912_connector_mode_valid(struct drm_connector *connector, +- struct drm_display_mode *mode) +-{ +- if (mode->clock > 150000) +- return MODE_CLOCK_HIGH; +- +- if (mode->hdisplay > 1920) +- return MODE_BAD_HVALUE; +- +- if (mode->vdisplay > 1080) +- return MODE_BAD_VVALUE; +- +- return MODE_OK; +-} +- + static int lt8912_connector_get_modes(struct drm_connector *connector) + { + const struct drm_edid *drm_edid; +@@ -452,7 +436,6 @@ static int lt8912_connector_get_modes(struct drm_connector *connector) + + static const struct drm_connector_helper_funcs lt8912_connector_helper_funcs = { + .get_modes = lt8912_connector_get_modes, +- .mode_valid = lt8912_connector_mode_valid, + }; + + static void lt8912_bridge_mode_set(struct drm_bridge *bridge, +@@ -594,6 +577,23 @@ static void lt8912_bridge_detach(struct drm_bridge *bridge) + drm_bridge_hpd_disable(lt->hdmi_port); + } + ++static enum drm_mode_status ++lt8912_bridge_mode_valid(struct drm_bridge *bridge, ++ const struct drm_display_info *info, ++ const struct drm_display_mode *mode) ++{ ++ if (mode->clock > 150000) ++ return MODE_CLOCK_HIGH; ++ ++ if (mode->hdisplay > 1920) ++ return MODE_BAD_HVALUE; ++ ++ if (mode->vdisplay > 1080) ++ return MODE_BAD_VVALUE; ++ ++ return MODE_OK; ++} ++ + static enum drm_connector_status + lt8912_bridge_detect(struct drm_bridge *bridge) + { +@@ -624,6 +624,7 @@ static struct edid *lt8912_bridge_get_edid(struct drm_bridge *bridge, + static const struct drm_bridge_funcs lt8912_bridge_funcs = { + .attach = lt8912_bridge_attach, + .detach = lt8912_bridge_detach, ++ .mode_valid = lt8912_bridge_mode_valid, + .mode_set = lt8912_bridge_mode_set, + .enable = lt8912_bridge_enable, + .detect = lt8912_bridge_detect, +diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c +index 5302bebbe38c95..1456abd5b9dde1 100644 +--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c ++++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c +@@ -1173,7 +1173,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data) + struct exynos_drm_ipp *ipp = &ctx->ipp; + + ctx->drm_dev = drm_dev; +- ctx->drm_dev = drm_dev; ++ ipp->drm_dev = drm_dev; + exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv); + + exynos_drm_ipp_register(dev, ipp, &ipp_funcs, +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +index 29e021ec6901c0..659112da47b692 100644 +--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c ++++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +@@ -67,6 +67,8 @@ struct mtk_drm_crtc { + /* lock for display hardware access */ + struct mutex hw_lock; + bool config_updating; ++ /* lock for config_updating to cmd buffer */ ++ spinlock_t config_lock; + }; + + struct mtk_crtc_state { +@@ -104,11 +106,16 @@ static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) + + static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) + { ++ unsigned long flags; ++ + drm_crtc_handle_vblank(&mtk_crtc->base); ++ ++ spin_lock_irqsave(&mtk_crtc->config_lock, flags); + if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) { + mtk_drm_crtc_finish_page_flip(mtk_crtc); + mtk_crtc->pending_needs_vblank = false; + } ++ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); + } + + #if IS_REACHABLE(CONFIG_MTK_CMDQ) +@@ -291,12 +298,19 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) + struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client); + struct mtk_crtc_state *state; + unsigned int i; ++ unsigned long flags; + + if (data->sta < 0) + return; + + state = to_mtk_crtc_state(mtk_crtc->base.state); + ++ spin_lock_irqsave(&mtk_crtc->config_lock, flags); ++ if (mtk_crtc->config_updating) { ++ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); ++ goto ddp_cmdq_cb_out; ++ } ++ + state->pending_config = false; + + if (mtk_crtc->pending_planes) { +@@ -323,6 +337,10 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) + mtk_crtc->pending_async_planes = false; + } + ++ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); ++ ++ddp_cmdq_cb_out: ++ + mtk_crtc->cmdq_vblank_cnt = 0; + wake_up(&mtk_crtc->cb_blocking_queue); + } +@@ -432,6 +450,7 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) + { + struct drm_device *drm = mtk_crtc->base.dev; + struct drm_crtc *crtc = &mtk_crtc->base; ++ unsigned long flags; + int i; + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { +@@ -463,10 +482,10 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) + pm_runtime_put(drm->dev); + + if (crtc->state->event && !crtc->state->active) { +- spin_lock_irq(&crtc->dev->event_lock); ++ spin_lock_irqsave(&crtc->dev->event_lock, flags); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; +- spin_unlock_irq(&crtc->dev->event_lock); ++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + } + } + +@@ -555,9 +574,14 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, + struct mtk_drm_private *priv = crtc->dev->dev_private; + unsigned int pending_planes = 0, pending_async_planes = 0; + int i; ++ unsigned long flags; + + mutex_lock(&mtk_crtc->hw_lock); ++ ++ spin_lock_irqsave(&mtk_crtc->config_lock, flags); + mtk_crtc->config_updating = true; ++ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); ++ + if (needs_vblank) + mtk_crtc->pending_needs_vblank = true; + +@@ -611,7 +635,10 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, + mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0); + } + #endif ++ spin_lock_irqsave(&mtk_crtc->config_lock, flags); + mtk_crtc->config_updating = false; ++ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); ++ + mutex_unlock(&mtk_crtc->hw_lock); + } + +@@ -1014,6 +1041,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, + drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size); + drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size); + mutex_init(&mtk_crtc->hw_lock); ++ spin_lock_init(&mtk_crtc->config_lock); + + #if IS_REACHABLE(CONFIG_MTK_CMDQ) + i = priv->mbox_index++; +diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +index e5916c10679679..8c2758a18a19cf 100644 +--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +@@ -65,6 +65,8 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, + + static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) + { ++ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); ++ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + struct msm_ringbuffer *ring = submit->ring; + struct drm_gem_object *obj; + uint32_t *ptr, dwords; +@@ -109,6 +111,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit + } + } + ++ a5xx_gpu->last_seqno[ring->id] = submit->seqno; + a5xx_flush(gpu, ring, true); + a5xx_preempt_trigger(gpu); + +@@ -150,9 +153,13 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 1); + +- /* Enable local preemption for finegrain preemption */ ++ /* ++ * Disable local preemption by default because it requires ++ * user-space to be aware of it and provide additional handling ++ * to restore rendering state or do various flushes on switch. ++ */ + OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1); +- OUT_RING(ring, 0x1); ++ OUT_RING(ring, 0x0); + + /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */ + OUT_PKT7(ring, CP_YIELD_ENABLE, 1); +@@ -206,6 +213,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) + /* Write the fence to the scratch register */ + OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1); + OUT_RING(ring, submit->seqno); ++ a5xx_gpu->last_seqno[ring->id] = submit->seqno; + + /* + * Execute a CACHE_FLUSH_TS event. This will ensure that the +diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +index c7187bcc5e9082..9c0d701fe4b85b 100644 +--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h ++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +@@ -34,8 +34,10 @@ struct a5xx_gpu { + struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS]; + struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS]; + uint64_t preempt_iova[MSM_GPU_MAX_RINGS]; ++ uint32_t last_seqno[MSM_GPU_MAX_RINGS]; + + atomic_t preempt_state; ++ spinlock_t preempt_start_lock; + struct timer_list preempt_timer; + + struct drm_gem_object *shadow_bo; +diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +index f58dd564d122ba..0469fea5501083 100644 +--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c ++++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +@@ -55,6 +55,8 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) + /* Return the highest priority ringbuffer with something in it */ + static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) + { ++ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); ++ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + unsigned long flags; + int i; + +@@ -64,6 +66,8 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) + + spin_lock_irqsave(&ring->preempt_lock, flags); + empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); ++ if (!empty && ring == a5xx_gpu->cur_ring) ++ empty = ring->memptrs->fence == a5xx_gpu->last_seqno[i]; + spin_unlock_irqrestore(&ring->preempt_lock, flags); + + if (!empty) +@@ -97,12 +101,19 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu) + if (gpu->nr_rings == 1) + return; + ++ /* ++ * Serialize preemption start to ensure that we always make ++ * decision on latest state. Otherwise we can get stuck in ++ * lower priority or empty ring. ++ */ ++ spin_lock_irqsave(&a5xx_gpu->preempt_start_lock, flags); ++ + /* + * Try to start preemption by moving from NONE to START. If + * unsuccessful, a preemption is already in flight + */ + if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START)) +- return; ++ goto out; + + /* Get the next ring to preempt to */ + ring = get_next_ring(gpu); +@@ -127,9 +138,11 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu) + set_preempt_state(a5xx_gpu, PREEMPT_ABORT); + update_wptr(gpu, a5xx_gpu->cur_ring); + set_preempt_state(a5xx_gpu, PREEMPT_NONE); +- return; ++ goto out; + } + ++ spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags); ++ + /* Make sure the wptr doesn't update while we're in motion */ + spin_lock_irqsave(&ring->preempt_lock, flags); + a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring); +@@ -152,6 +165,10 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu) + + /* And actually start the preemption */ + gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1); ++ return; ++ ++out: ++ spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags); + } + + void a5xx_preempt_irq(struct msm_gpu *gpu) +@@ -188,6 +205,12 @@ void a5xx_preempt_irq(struct msm_gpu *gpu) + update_wptr(gpu, a5xx_gpu->cur_ring); + + set_preempt_state(a5xx_gpu, PREEMPT_NONE); ++ ++ /* ++ * Try to trigger preemption again in case there was a submit or ++ * retire during ring switch ++ */ ++ a5xx_preempt_trigger(gpu); + } + + void a5xx_preempt_hw_init(struct msm_gpu *gpu) +@@ -204,6 +227,8 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu) + return; + + for (i = 0; i < gpu->nr_rings; i++) { ++ a5xx_gpu->preempt[i]->data = 0; ++ a5xx_gpu->preempt[i]->info = 0; + a5xx_gpu->preempt[i]->wptr = 0; + a5xx_gpu->preempt[i]->rptr = 0; + a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova; +@@ -298,5 +323,6 @@ void a5xx_preempt_init(struct msm_gpu *gpu) + } + } + ++ spin_lock_init(&a5xx_gpu->preempt_start_lock); + timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0); + } +diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c +index 96deaf85c0cd27..4127e2762dcd10 100644 +--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c +@@ -468,7 +468,7 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname) + ret = request_firmware_direct(&fw, fwname, drm->dev); + if (!ret) { + DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n", +- newname); ++ fwname); + adreno_gpu->fwloc = FW_LOCATION_LEGACY; + goto out; + } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { +diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c +index 56a3063545ec46..12d07e93a4c47e 100644 +--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c ++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c +@@ -356,7 +356,7 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p) + + drm_printf(p, "%s:%d\t%d\t%s\n", + pipe2name(pipe), j, inuse, +- plane ? plane->name : NULL); ++ plane ? plane->name : "(null)"); + + total += inuse; + } +diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +index 89a6344bc8653d..f72ce6a3c456d5 100644 +--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c ++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +@@ -135,7 +135,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config + config->pll_clock_inverters = 0x00; + else + config->pll_clock_inverters = 0x40; +- } else { ++ } else if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) { + if (pll_freq <= 1000000000ULL) + config->pll_clock_inverters = 0xa0; + else if (pll_freq <= 2500000000ULL) +@@ -144,6 +144,16 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config + config->pll_clock_inverters = 0x00; + else + config->pll_clock_inverters = 0x40; ++ } else { ++ /* 4.2, 4.3 */ ++ if (pll_freq <= 1000000000ULL) ++ config->pll_clock_inverters = 0xa0; ++ else if (pll_freq <= 2500000000ULL) ++ config->pll_clock_inverters = 0x20; ++ else if (pll_freq <= 3500000000ULL) ++ config->pll_clock_inverters = 0x00; ++ else ++ config->pll_clock_inverters = 0x40; + } + + config->decimal_div_start = dec; +diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c +index 0de79f3a7e3ffc..820c2c3641d388 100644 +--- a/drivers/gpu/drm/radeon/evergreen_cs.c ++++ b/drivers/gpu/drm/radeon/evergreen_cs.c +@@ -395,7 +395,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i + struct evergreen_cs_track *track = p->track; + struct eg_surface surf; + unsigned pitch, slice, mslice; +- unsigned long offset; ++ u64 offset; + int r; + + mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1; +@@ -433,14 +433,14 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i + return r; + } + +- offset = track->cb_color_bo_offset[id] << 8; ++ offset = (u64)track->cb_color_bo_offset[id] << 8; + if (offset & (surf.base_align - 1)) { +- dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n", ++ dev_warn(p->dev, "%s:%d cb[%d] bo base %llu not aligned with %ld\n", + __func__, __LINE__, id, offset, surf.base_align); + return -EINVAL; + } + +- offset += surf.layer_size * mslice; ++ offset += (u64)surf.layer_size * mslice; + if (offset > radeon_bo_size(track->cb_color_bo[id])) { + /* old ddx are broken they allocate bo with w*h*bpp but + * program slice with ALIGN(h, 8), catch this and patch +@@ -448,14 +448,14 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i + */ + if (!surf.mode) { + uint32_t *ib = p->ib.ptr; +- unsigned long tmp, nby, bsize, size, min = 0; ++ u64 tmp, nby, bsize, size, min = 0; + + /* find the height the ddx wants */ + if (surf.nby > 8) { + min = surf.nby - 8; + } + bsize = radeon_bo_size(track->cb_color_bo[id]); +- tmp = track->cb_color_bo_offset[id] << 8; ++ tmp = (u64)track->cb_color_bo_offset[id] << 8; + for (nby = surf.nby; nby > min; nby--) { + size = nby * surf.nbx * surf.bpe * surf.nsamples; + if ((tmp + size * mslice) <= bsize) { +@@ -467,7 +467,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i + slice = ((nby * surf.nbx) / 64) - 1; + if (!evergreen_surface_check(p, &surf, "cb")) { + /* check if this one works */ +- tmp += surf.layer_size * mslice; ++ tmp += (u64)surf.layer_size * mslice; + if (tmp <= bsize) { + ib[track->cb_color_slice_idx[id]] = slice; + goto old_ddx_ok; +@@ -476,9 +476,9 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i + } + } + dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, " +- "offset %d, max layer %d, bo size %ld, slice %d)\n", ++ "offset %llu, max layer %d, bo size %ld, slice %d)\n", + __func__, __LINE__, id, surf.layer_size, +- track->cb_color_bo_offset[id] << 8, mslice, ++ (u64)track->cb_color_bo_offset[id] << 8, mslice, + radeon_bo_size(track->cb_color_bo[id]), slice); + dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n", + __func__, __LINE__, surf.nbx, surf.nby, +@@ -562,7 +562,7 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p) + struct evergreen_cs_track *track = p->track; + struct eg_surface surf; + unsigned pitch, slice, mslice; +- unsigned long offset; ++ u64 offset; + int r; + + mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1; +@@ -608,18 +608,18 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p) + return r; + } + +- offset = track->db_s_read_offset << 8; ++ offset = (u64)track->db_s_read_offset << 8; + if (offset & (surf.base_align - 1)) { +- dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n", ++ dev_warn(p->dev, "%s:%d stencil read bo base %llu not aligned with %ld\n", + __func__, __LINE__, offset, surf.base_align); + return -EINVAL; + } +- offset += surf.layer_size * mslice; ++ offset += (u64)surf.layer_size * mslice; + if (offset > radeon_bo_size(track->db_s_read_bo)) { + dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, " +- "offset %ld, max layer %d, bo size %ld)\n", ++ "offset %llu, max layer %d, bo size %ld)\n", + __func__, __LINE__, surf.layer_size, +- (unsigned long)track->db_s_read_offset << 8, mslice, ++ (u64)track->db_s_read_offset << 8, mslice, + radeon_bo_size(track->db_s_read_bo)); + dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n", + __func__, __LINE__, track->db_depth_size, +@@ -627,18 +627,18 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p) + return -EINVAL; + } + +- offset = track->db_s_write_offset << 8; ++ offset = (u64)track->db_s_write_offset << 8; + if (offset & (surf.base_align - 1)) { +- dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n", ++ dev_warn(p->dev, "%s:%d stencil write bo base %llu not aligned with %ld\n", + __func__, __LINE__, offset, surf.base_align); + return -EINVAL; + } +- offset += surf.layer_size * mslice; ++ offset += (u64)surf.layer_size * mslice; + if (offset > radeon_bo_size(track->db_s_write_bo)) { + dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, " +- "offset %ld, max layer %d, bo size %ld)\n", ++ "offset %llu, max layer %d, bo size %ld)\n", + __func__, __LINE__, surf.layer_size, +- (unsigned long)track->db_s_write_offset << 8, mslice, ++ (u64)track->db_s_write_offset << 8, mslice, + radeon_bo_size(track->db_s_write_bo)); + return -EINVAL; + } +@@ -659,7 +659,7 @@ static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p) + struct evergreen_cs_track *track = p->track; + struct eg_surface surf; + unsigned pitch, slice, mslice; +- unsigned long offset; ++ u64 offset; + int r; + + mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1; +@@ -706,34 +706,34 @@ static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p) + return r; + } + +- offset = track->db_z_read_offset << 8; ++ offset = (u64)track->db_z_read_offset << 8; + if (offset & (surf.base_align - 1)) { +- dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n", ++ dev_warn(p->dev, "%s:%d stencil read bo base %llu not aligned with %ld\n", + __func__, __LINE__, offset, surf.base_align); + return -EINVAL; + } +- offset += surf.layer_size * mslice; ++ offset += (u64)surf.layer_size * mslice; + if (offset > radeon_bo_size(track->db_z_read_bo)) { + dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, " +- "offset %ld, max layer %d, bo size %ld)\n", ++ "offset %llu, max layer %d, bo size %ld)\n", + __func__, __LINE__, surf.layer_size, +- (unsigned long)track->db_z_read_offset << 8, mslice, ++ (u64)track->db_z_read_offset << 8, mslice, + radeon_bo_size(track->db_z_read_bo)); + return -EINVAL; + } + +- offset = track->db_z_write_offset << 8; ++ offset = (u64)track->db_z_write_offset << 8; + if (offset & (surf.base_align - 1)) { +- dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n", ++ dev_warn(p->dev, "%s:%d stencil write bo base %llu not aligned with %ld\n", + __func__, __LINE__, offset, surf.base_align); + return -EINVAL; + } +- offset += surf.layer_size * mslice; ++ offset += (u64)surf.layer_size * mslice; + if (offset > radeon_bo_size(track->db_z_write_bo)) { + dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, " +- "offset %ld, max layer %d, bo size %ld)\n", ++ "offset %llu, max layer %d, bo size %ld)\n", + __func__, __LINE__, surf.layer_size, +- (unsigned long)track->db_z_write_offset << 8, mslice, ++ (u64)track->db_z_write_offset << 8, mslice, + radeon_bo_size(track->db_z_write_bo)); + return -EINVAL; + } +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c +index 061396e7fa0f9c..53c7273eb6a5cf 100644 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c +@@ -1716,26 +1716,29 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct + fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record; + if (fake_edid_record->ucFakeEDIDLength) { + struct edid *edid; +- int edid_size = +- max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength); +- edid = kmalloc(edid_size, GFP_KERNEL); ++ int edid_size; ++ ++ if (fake_edid_record->ucFakeEDIDLength == 128) ++ edid_size = fake_edid_record->ucFakeEDIDLength; ++ else ++ edid_size = fake_edid_record->ucFakeEDIDLength * 128; ++ edid = kmemdup(&fake_edid_record->ucFakeEDIDString[0], ++ edid_size, GFP_KERNEL); + if (edid) { +- memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0], +- fake_edid_record->ucFakeEDIDLength); +- + if (drm_edid_is_valid(edid)) { + rdev->mode_info.bios_hardcoded_edid = edid; + rdev->mode_info.bios_hardcoded_edid_size = edid_size; +- } else ++ } else { + kfree(edid); ++ } + } ++ record += struct_size(fake_edid_record, ++ ucFakeEDIDString, ++ edid_size); ++ } else { ++ /* empty fake edid record must be 3 bytes long */ ++ record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1; + } +- record += fake_edid_record->ucFakeEDIDLength ? +- struct_size(fake_edid_record, +- ucFakeEDIDString, +- fake_edid_record->ucFakeEDIDLength) : +- /* empty fake edid record must be 3 bytes long */ +- sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1; + break; + case LCD_PANEL_RESOLUTION_RECORD_TYPE: + panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; +diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +index 341550199111f9..89bc86d620146c 100644 +--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c ++++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +@@ -435,6 +435,8 @@ static void dw_hdmi_rk3328_setup_hpd(struct dw_hdmi *dw_hdmi, void *data) + HIWORD_UPDATE(RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK, + RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK | + RK3328_HDMI_HPD_IOE)); ++ ++ dw_hdmi_rk3328_read_hpd(dw_hdmi, data); + } + + static const struct dw_hdmi_phy_ops rk3228_hdmi_phy_ops = { +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +index 4b338cb89d32d4..c6fbfc0baeccd4 100644 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +@@ -381,8 +381,8 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win, + if (info->is_yuv) + is_yuv = true; + +- if (dst_w > 3840) { +- DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n"); ++ if (dst_w > 4096) { ++ DRM_DEV_ERROR(vop->dev, "Maximum dst width (4096) exceeded\n"); + return; + } + +diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c +index e8523abef27a50..4d2db079ad4ff3 100644 +--- a/drivers/gpu/drm/stm/drv.c ++++ b/drivers/gpu/drm/stm/drv.c +@@ -203,12 +203,14 @@ static int stm_drm_platform_probe(struct platform_device *pdev) + + ret = drm_dev_register(ddev, 0); + if (ret) +- goto err_put; ++ goto err_unload; + + drm_fbdev_dma_setup(ddev, 16); + + return 0; + ++err_unload: ++ drv_unload(ddev); + err_put: + drm_dev_put(ddev); + +diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c +index 5576fdae496233..5aec1e58c968c2 100644 +--- a/drivers/gpu/drm/stm/ltdc.c ++++ b/drivers/gpu/drm/stm/ltdc.c +@@ -1580,6 +1580,8 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev, + ARRAY_SIZE(ltdc_drm_fmt_ycbcr_sp) + + ARRAY_SIZE(ltdc_drm_fmt_ycbcr_fp)) * + sizeof(*formats), GFP_KERNEL); ++ if (!formats) ++ return NULL; + + for (i = 0; i < ldev->caps.pix_fmt_nb; i++) { + drm_fmt = ldev->caps.pix_fmt_drm[i]; +diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c +index 643754fa6a8ad0..c6e986f71a26f8 100644 +--- a/drivers/gpu/drm/vc4/vc4_hdmi.c ++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c +@@ -458,6 +458,7 @@ static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector, + { + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); + enum drm_connector_status status = connector_status_disconnected; ++ int ret; + + /* + * NOTE: This function should really take vc4_hdmi->mutex, but +@@ -470,7 +471,12 @@ static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector, + * the lock for now. + */ + +- WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev)); ++ ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev); ++ if (ret) { ++ drm_err_once(connector->dev, "Failed to retain HDMI power domain: %d\n", ++ ret); ++ return connector_status_unknown; ++ } + + if (vc4_hdmi->hpd_gpio) { + if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +index ae796e0c64aa52..fdc34283eeb97f 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +@@ -331,6 +331,8 @@ void *vmw_bo_map_and_cache(struct vmw_bo *vbo) + void *virtual; + int ret; + ++ atomic_inc(&vbo->map_count); ++ + virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used); + if (virtual) + return virtual; +@@ -353,11 +355,17 @@ void *vmw_bo_map_and_cache(struct vmw_bo *vbo) + */ + void vmw_bo_unmap(struct vmw_bo *vbo) + { ++ int map_count; ++ + if (vbo->map.bo == NULL) + return; + +- ttm_bo_kunmap(&vbo->map); +- vbo->map.bo = NULL; ++ map_count = atomic_dec_return(&vbo->map_count); ++ ++ if (!map_count) { ++ ttm_bo_kunmap(&vbo->map); ++ vbo->map.bo = NULL; ++ } + } + + +@@ -390,6 +398,7 @@ static int vmw_bo_init(struct vmw_private *dev_priv, + BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); + vmw_bo->tbo.priority = 3; + vmw_bo->res_tree = RB_ROOT; ++ atomic_set(&vmw_bo->map_count, 0); + + params->size = ALIGN(params->size, PAGE_SIZE); + drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size); +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h +index f349642e6190d6..156ea612fc2a48 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h +@@ -68,6 +68,8 @@ struct vmw_bo_params { + * @map: Kmap object for semi-persistent mappings + * @res_tree: RB tree of resources using this buffer object as a backing MOB + * @res_prios: Eviction priority counts for attached resources ++ * @map_count: The number of currently active maps. Will differ from the ++ * cpu_writers because it includes kernel maps. + * @cpu_writers: Number of synccpu write grabs. Protected by reservation when + * increased. May be decreased without reservation. + * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB +@@ -86,6 +88,7 @@ struct vmw_bo { + struct rb_root res_tree; + u32 res_prios[TTM_MAX_BO_PRIORITY]; + ++ atomic_t map_count; + atomic_t cpu_writers; + /* Not ref-counted. Protected by binding_mutex */ + struct vmw_resource *dx_query_ctx; +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c +index 5db26a8af7728d..18b5cd0234d213 100644 +--- a/drivers/hid/wacom_wac.c ++++ b/drivers/hid/wacom_wac.c +@@ -2368,6 +2368,9 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev, + wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS3, 0); + features->quirks &= ~WACOM_QUIRK_PEN_BUTTON3; + break; ++ case WACOM_HID_WD_SEQUENCENUMBER: ++ wacom_wac->hid_data.sequence_number = -1; ++ break; + } + } + +@@ -2492,9 +2495,15 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field + wacom_wac->hid_data.barrelswitch3 = value; + return; + case WACOM_HID_WD_SEQUENCENUMBER: +- if (wacom_wac->hid_data.sequence_number != value) +- hid_warn(hdev, "Dropped %hu packets", (unsigned short)(value - wacom_wac->hid_data.sequence_number)); ++ if (wacom_wac->hid_data.sequence_number != value && ++ wacom_wac->hid_data.sequence_number >= 0) { ++ int sequence_size = field->logical_maximum - field->logical_minimum + 1; ++ int drop_count = (value - wacom_wac->hid_data.sequence_number) % sequence_size; ++ hid_warn(hdev, "Dropped %d packets", drop_count); ++ } + wacom_wac->hid_data.sequence_number = value + 1; ++ if (wacom_wac->hid_data.sequence_number > field->logical_maximum) ++ wacom_wac->hid_data.sequence_number = field->logical_minimum; + return; + } + +diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h +index 57e185f18d53da..61073fe81ead22 100644 +--- a/drivers/hid/wacom_wac.h ++++ b/drivers/hid/wacom_wac.h +@@ -324,7 +324,7 @@ struct hid_data { + int bat_connected; + int ps_connected; + bool pad_input_event_flag; +- unsigned short sequence_number; ++ int sequence_number; + ktime_t time_delayed; + }; + +diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c +index aa38c45adc09e2..0ccb5eb596fc40 100644 +--- a/drivers/hwmon/max16065.c ++++ b/drivers/hwmon/max16065.c +@@ -79,7 +79,7 @@ static const bool max16065_have_current[] = { + }; + + struct max16065_data { +- enum chips type; ++ enum chips chip; + struct i2c_client *client; + const struct attribute_group *groups[4]; + struct mutex update_lock; +@@ -114,9 +114,10 @@ static inline int LIMIT_TO_MV(int limit, int range) + return limit * range / 256; + } + +-static inline int MV_TO_LIMIT(int mv, int range) ++static inline int MV_TO_LIMIT(unsigned long mv, int range) + { +- return clamp_val(DIV_ROUND_CLOSEST(mv * 256, range), 0, 255); ++ mv = clamp_val(mv, 0, ULONG_MAX / 256); ++ return DIV_ROUND_CLOSEST(clamp_val(mv * 256, 0, range * 255), range); + } + + static inline int ADC_TO_CURR(int adc, int gain) +@@ -161,10 +162,17 @@ static struct max16065_data *max16065_update_device(struct device *dev) + MAX16065_CURR_SENSE); + } + +- for (i = 0; i < DIV_ROUND_UP(data->num_adc, 8); i++) ++ for (i = 0; i < 2; i++) + data->fault[i] + = i2c_smbus_read_byte_data(client, MAX16065_FAULT(i)); + ++ /* ++ * MAX16067 and MAX16068 have separate undervoltage and ++ * overvoltage alarm bits. Squash them together. ++ */ ++ if (data->chip == max16067 || data->chip == max16068) ++ data->fault[0] |= data->fault[1]; ++ + data->last_updated = jiffies; + data->valid = true; + } +@@ -493,8 +501,6 @@ static const struct attribute_group max16065_max_group = { + .is_visible = max16065_secondary_is_visible, + }; + +-static const struct i2c_device_id max16065_id[]; +- + static int max16065_probe(struct i2c_client *client) + { + struct i2c_adapter *adapter = client->adapter; +@@ -505,7 +511,7 @@ static int max16065_probe(struct i2c_client *client) + bool have_secondary; /* true if chip has secondary limits */ + bool secondary_is_max = false; /* secondary limits reflect max */ + int groups = 0; +- const struct i2c_device_id *id = i2c_match_id(max16065_id, client); ++ enum chips chip = (uintptr_t)i2c_get_match_data(client); + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA + | I2C_FUNC_SMBUS_READ_WORD_DATA)) +@@ -515,12 +521,13 @@ static int max16065_probe(struct i2c_client *client) + if (unlikely(!data)) + return -ENOMEM; + ++ data->chip = chip; + data->client = client; + mutex_init(&data->update_lock); + +- data->num_adc = max16065_num_adc[id->driver_data]; +- data->have_current = max16065_have_current[id->driver_data]; +- have_secondary = max16065_have_secondary[id->driver_data]; ++ data->num_adc = max16065_num_adc[chip]; ++ data->have_current = max16065_have_current[chip]; ++ have_secondary = max16065_have_secondary[chip]; + + if (have_secondary) { + val = i2c_smbus_read_byte_data(client, MAX16065_SW_ENABLE); +diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c +index ef75b63f5894e5..b5352900463fb9 100644 +--- a/drivers/hwmon/ntc_thermistor.c ++++ b/drivers/hwmon/ntc_thermistor.c +@@ -62,6 +62,7 @@ static const struct platform_device_id ntc_thermistor_id[] = { + [NTC_SSG1404001221] = { "ssg1404_001221", TYPE_NCPXXWB473 }, + [NTC_LAST] = { }, + }; ++MODULE_DEVICE_TABLE(platform, ntc_thermistor_id); + + /* + * A compensation table should be sorted by the values of .ohm +diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c +index 8311e1028ddb03..f3312fbcdc0f82 100644 +--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c ++++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c +@@ -255,6 +255,7 @@ void tmc_free_sg_table(struct tmc_sg_table *sg_table) + { + tmc_free_table_pages(sg_table); + tmc_free_data_pages(sg_table); ++ kfree(sg_table); + } + EXPORT_SYMBOL_GPL(tmc_free_sg_table); + +@@ -336,7 +337,6 @@ struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev, + rc = tmc_alloc_table_pages(sg_table); + if (rc) { + tmc_free_sg_table(sg_table); +- kfree(sg_table); + return ERR_PTR(rc); + } + +diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c +index 5511fd46a65eae..83e6714901b289 100644 +--- a/drivers/i2c/busses/i2c-aspeed.c ++++ b/drivers/i2c/busses/i2c-aspeed.c +@@ -170,6 +170,13 @@ struct aspeed_i2c_bus { + + static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus); + ++/* precondition: bus.lock has been acquired. */ ++static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus) ++{ ++ bus->master_state = ASPEED_I2C_MASTER_STOP; ++ writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); ++} ++ + static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus) + { + unsigned long time_left, flags; +@@ -187,7 +194,7 @@ static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus) + command); + + reinit_completion(&bus->cmd_complete); +- writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); ++ aspeed_i2c_do_stop(bus); + spin_unlock_irqrestore(&bus->lock, flags); + + time_left = wait_for_completion_timeout( +@@ -390,13 +397,6 @@ static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus) + writel(command, bus->base + ASPEED_I2C_CMD_REG); + } + +-/* precondition: bus.lock has been acquired. */ +-static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus) +-{ +- bus->master_state = ASPEED_I2C_MASTER_STOP; +- writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); +-} +- + /* precondition: bus.lock has been acquired. */ + static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus) + { +diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c +index 1dc1ceaa44439f..8c16c469557475 100644 +--- a/drivers/i2c/busses/i2c-isch.c ++++ b/drivers/i2c/busses/i2c-isch.c +@@ -99,8 +99,7 @@ static int sch_transaction(void) + if (retries > MAX_RETRIES) { + dev_err(&sch_adapter.dev, "SMBus Timeout!\n"); + result = -ETIMEDOUT; +- } +- if (temp & 0x04) { ++ } else if (temp & 0x04) { + result = -EIO; + dev_dbg(&sch_adapter.dev, "Bus collision! SMBus may be " + "locked until next hard reset. (sorry!)\n"); +diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c +index 1c08c0921ee712..4d755ffc3f4148 100644 +--- a/drivers/iio/adc/ad7606.c ++++ b/drivers/iio/adc/ad7606.c +@@ -215,9 +215,9 @@ static int ad7606_write_os_hw(struct iio_dev *indio_dev, int val) + struct ad7606_state *st = iio_priv(indio_dev); + DECLARE_BITMAP(values, 3); + +- values[0] = val; ++ values[0] = val & GENMASK(2, 0); + +- gpiod_set_array_value(ARRAY_SIZE(values), st->gpio_os->desc, ++ gpiod_set_array_value(st->gpio_os->ndescs, st->gpio_os->desc, + st->gpio_os->info, values); + + /* AD7616 requires a reset to update value */ +@@ -422,7 +422,7 @@ static int ad7606_request_gpios(struct ad7606_state *st) + return PTR_ERR(st->gpio_range); + + st->gpio_standby = devm_gpiod_get_optional(dev, "standby", +- GPIOD_OUT_HIGH); ++ GPIOD_OUT_LOW); + if (IS_ERR(st->gpio_standby)) + return PTR_ERR(st->gpio_standby); + +@@ -665,7 +665,7 @@ static int ad7606_suspend(struct device *dev) + + if (st->gpio_standby) { + gpiod_set_value(st->gpio_range, 1); +- gpiod_set_value(st->gpio_standby, 0); ++ gpiod_set_value(st->gpio_standby, 1); + } + + return 0; +diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c +index 263a778bcf2539..287a0591533b6a 100644 +--- a/drivers/iio/adc/ad7606_spi.c ++++ b/drivers/iio/adc/ad7606_spi.c +@@ -249,8 +249,9 @@ static int ad7616_sw_mode_config(struct iio_dev *indio_dev) + static int ad7606B_sw_mode_config(struct iio_dev *indio_dev) + { + struct ad7606_state *st = iio_priv(indio_dev); +- unsigned long os[3] = {1}; ++ DECLARE_BITMAP(os, 3); + ++ bitmap_fill(os, 3); + /* + * Software mode is enabled when all three oversampling + * pins are set to high. If oversampling gpios are defined +@@ -258,7 +259,7 @@ static int ad7606B_sw_mode_config(struct iio_dev *indio_dev) + * otherwise, they must be hardwired to VDD + */ + if (st->gpio_os) { +- gpiod_set_array_value(ARRAY_SIZE(os), ++ gpiod_set_array_value(st->gpio_os->ndescs, + st->gpio_os->desc, st->gpio_os->info, os); + } + /* OS of 128 and 256 are available only in software mode */ +diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c +index 500f56834b01f6..a6bf689833dad7 100644 +--- a/drivers/iio/chemical/bme680_core.c ++++ b/drivers/iio/chemical/bme680_core.c +@@ -10,6 +10,7 @@ + */ + #include + #include ++#include + #include + #include + #include +@@ -52,6 +53,7 @@ struct bme680_calib { + struct bme680_data { + struct regmap *regmap; + struct bme680_calib bme680; ++ struct mutex lock; /* Protect multiple serial R/W ops to device. */ + u8 oversampling_temp; + u8 oversampling_press; + u8 oversampling_humid; +@@ -827,6 +829,8 @@ static int bme680_read_raw(struct iio_dev *indio_dev, + { + struct bme680_data *data = iio_priv(indio_dev); + ++ guard(mutex)(&data->lock); ++ + switch (mask) { + case IIO_CHAN_INFO_PROCESSED: + switch (chan->type) { +@@ -871,6 +875,8 @@ static int bme680_write_raw(struct iio_dev *indio_dev, + { + struct bme680_data *data = iio_priv(indio_dev); + ++ guard(mutex)(&data->lock); ++ + if (val2 != 0) + return -EINVAL; + +@@ -967,6 +973,7 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap, + name = bme680_match_acpi_device(dev); + + data = iio_priv(indio_dev); ++ mutex_init(&data->lock); + dev_set_drvdata(dev, indio_dev); + data->regmap = regmap; + indio_dev->name = name; +diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c +index eb706d0bf70bc0..3b7984fd129d7d 100644 +--- a/drivers/iio/magnetometer/ak8975.c ++++ b/drivers/iio/magnetometer/ak8975.c +@@ -204,7 +204,6 @@ static long ak09912_raw_to_gauss(u16 data) + + /* Compatible Asahi Kasei Compass parts */ + enum asahi_compass_chipset { +- AKXXXX = 0, + AK8975, + AK8963, + AK09911, +@@ -248,7 +247,7 @@ struct ak_def { + }; + + static const struct ak_def ak_def_array[] = { +- { ++ [AK8975] = { + .type = AK8975, + .raw_to_gauss = ak8975_raw_to_gauss, + .range = 4096, +@@ -273,7 +272,7 @@ static const struct ak_def ak_def_array[] = { + AK8975_REG_HYL, + AK8975_REG_HZL}, + }, +- { ++ [AK8963] = { + .type = AK8963, + .raw_to_gauss = ak8963_09911_raw_to_gauss, + .range = 8190, +@@ -298,7 +297,7 @@ static const struct ak_def ak_def_array[] = { + AK8975_REG_HYL, + AK8975_REG_HZL}, + }, +- { ++ [AK09911] = { + .type = AK09911, + .raw_to_gauss = ak8963_09911_raw_to_gauss, + .range = 8192, +@@ -323,7 +322,7 @@ static const struct ak_def ak_def_array[] = { + AK09912_REG_HYL, + AK09912_REG_HZL}, + }, +- { ++ [AK09912] = { + .type = AK09912, + .raw_to_gauss = ak09912_raw_to_gauss, + .range = 32752, +@@ -348,7 +347,7 @@ static const struct ak_def ak_def_array[] = { + AK09912_REG_HYL, + AK09912_REG_HZL}, + }, +- { ++ [AK09916] = { + .type = AK09916, + .raw_to_gauss = ak09912_raw_to_gauss, + .range = 32752, +@@ -813,13 +812,13 @@ static const struct iio_info ak8975_info = { + }; + + static const struct acpi_device_id ak_acpi_match[] = { +- {"AK8975", AK8975}, +- {"AK8963", AK8963}, +- {"INVN6500", AK8963}, +- {"AK009911", AK09911}, +- {"AK09911", AK09911}, +- {"AKM9911", AK09911}, +- {"AK09912", AK09912}, ++ {"AK8975", (kernel_ulong_t)&ak_def_array[AK8975] }, ++ {"AK8963", (kernel_ulong_t)&ak_def_array[AK8963] }, ++ {"INVN6500", (kernel_ulong_t)&ak_def_array[AK8963] }, ++ {"AK009911", (kernel_ulong_t)&ak_def_array[AK09911] }, ++ {"AK09911", (kernel_ulong_t)&ak_def_array[AK09911] }, ++ {"AKM9911", (kernel_ulong_t)&ak_def_array[AK09911] }, ++ {"AK09912", (kernel_ulong_t)&ak_def_array[AK09912] }, + { } + }; + MODULE_DEVICE_TABLE(acpi, ak_acpi_match); +@@ -883,10 +882,7 @@ static int ak8975_probe(struct i2c_client *client) + struct iio_dev *indio_dev; + struct gpio_desc *eoc_gpiod; + struct gpio_desc *reset_gpiod; +- const void *match; +- unsigned int i; + int err; +- enum asahi_compass_chipset chipset; + const char *name = NULL; + + /* +@@ -928,27 +924,15 @@ static int ak8975_probe(struct i2c_client *client) + return err; + + /* id will be NULL when enumerated via ACPI */ +- match = device_get_match_data(&client->dev); +- if (match) { +- chipset = (uintptr_t)match; +- name = dev_name(&client->dev); +- } else if (id) { +- chipset = (enum asahi_compass_chipset)(id->driver_data); +- name = id->name; +- } else +- return -ENOSYS; +- +- for (i = 0; i < ARRAY_SIZE(ak_def_array); i++) +- if (ak_def_array[i].type == chipset) +- break; +- +- if (i == ARRAY_SIZE(ak_def_array)) { +- dev_err(&client->dev, "AKM device type unsupported: %d\n", +- chipset); ++ data->def = i2c_get_match_data(client); ++ if (!data->def) + return -ENODEV; +- } + +- data->def = &ak_def_array[i]; ++ /* If enumerated via firmware node, fix the ABI */ ++ if (dev_fwnode(&client->dev)) ++ name = dev_name(&client->dev); ++ else ++ name = id->name; + + /* Fetch the regulators */ + data->vdd = devm_regulator_get(&client->dev, "vdd"); +@@ -1077,28 +1061,27 @@ static DEFINE_RUNTIME_DEV_PM_OPS(ak8975_dev_pm_ops, ak8975_runtime_suspend, + ak8975_runtime_resume, NULL); + + static const struct i2c_device_id ak8975_id[] = { +- {"ak8975", AK8975}, +- {"ak8963", AK8963}, +- {"AK8963", AK8963}, +- {"ak09911", AK09911}, +- {"ak09912", AK09912}, +- {"ak09916", AK09916}, ++ {"ak8975", (kernel_ulong_t)&ak_def_array[AK8975] }, ++ {"ak8963", (kernel_ulong_t)&ak_def_array[AK8963] }, ++ {"AK8963", (kernel_ulong_t)&ak_def_array[AK8963] }, ++ {"ak09911", (kernel_ulong_t)&ak_def_array[AK09911] }, ++ {"ak09912", (kernel_ulong_t)&ak_def_array[AK09912] }, ++ {"ak09916", (kernel_ulong_t)&ak_def_array[AK09916] }, + {} + }; + + MODULE_DEVICE_TABLE(i2c, ak8975_id); + + static const struct of_device_id ak8975_of_match[] = { +- { .compatible = "asahi-kasei,ak8975", }, +- { .compatible = "ak8975", }, +- { .compatible = "asahi-kasei,ak8963", }, +- { .compatible = "ak8963", }, +- { .compatible = "asahi-kasei,ak09911", }, +- { .compatible = "ak09911", }, +- { .compatible = "asahi-kasei,ak09912", }, +- { .compatible = "ak09912", }, +- { .compatible = "asahi-kasei,ak09916", }, +- { .compatible = "ak09916", }, ++ { .compatible = "asahi-kasei,ak8975", .data = &ak_def_array[AK8975] }, ++ { .compatible = "ak8975", .data = &ak_def_array[AK8975] }, ++ { .compatible = "asahi-kasei,ak8963", .data = &ak_def_array[AK8963] }, ++ { .compatible = "ak8963", .data = &ak_def_array[AK8963] }, ++ { .compatible = "asahi-kasei,ak09911", .data = &ak_def_array[AK09911] }, ++ { .compatible = "ak09911", .data = &ak_def_array[AK09911] }, ++ { .compatible = "asahi-kasei,ak09912", .data = &ak_def_array[AK09912] }, ++ { .compatible = "ak09912", .data = &ak_def_array[AK09912] }, ++ { .compatible = "asahi-kasei,ak09916", .data = &ak_def_array[AK09916] }, + {} + }; + MODULE_DEVICE_TABLE(of, ak8975_of_match); +diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c +index b7251ed7a8dfb2..0b88203720b059 100644 +--- a/drivers/infiniband/core/cache.c ++++ b/drivers/infiniband/core/cache.c +@@ -1640,8 +1640,10 @@ int ib_cache_setup_one(struct ib_device *device) + + rdma_for_each_port (device, p) { + err = ib_cache_update(device, p, true, true, true); +- if (err) ++ if (err) { ++ gid_table_cleanup_one(device); + return err; ++ } + } + + return 0; +diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c +index 2d09d1be38f19b..3e4941754b48d0 100644 +--- a/drivers/infiniband/core/iwcm.c ++++ b/drivers/infiniband/core/iwcm.c +@@ -1191,7 +1191,7 @@ static int __init iw_cm_init(void) + if (ret) + return ret; + +- iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0); ++ iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", WQ_MEM_RECLAIM); + if (!iwcm_wq) + goto err_alloc; + +diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c +index 040ba2224f9ff6..b3757c6a0457a1 100644 +--- a/drivers/infiniband/hw/cxgb4/cm.c ++++ b/drivers/infiniband/hw/cxgb4/cm.c +@@ -1222,6 +1222,8 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) + int ret; + + ep = lookup_atid(t, atid); ++ if (!ep) ++ return -EINVAL; + + pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid, + be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); +@@ -2279,6 +2281,9 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) + int ret = 0; + + ep = lookup_atid(t, atid); ++ if (!ep) ++ return -EINVAL; ++ + la = (struct sockaddr_in *)&ep->com.local_addr; + ra = (struct sockaddr_in *)&ep->com.remote_addr; + la6 = (struct sockaddr_in6 *)&ep->com.local_addr; +diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c +index c317947563fbc6..b010c4209ea381 100644 +--- a/drivers/infiniband/hw/erdma/erdma_verbs.c ++++ b/drivers/infiniband/hw/erdma/erdma_verbs.c +@@ -1540,11 +1540,31 @@ int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, + return ret; + } + ++static enum ib_qp_state query_qp_state(struct erdma_qp *qp) ++{ ++ switch (qp->attrs.state) { ++ case ERDMA_QP_STATE_IDLE: ++ return IB_QPS_INIT; ++ case ERDMA_QP_STATE_RTR: ++ return IB_QPS_RTR; ++ case ERDMA_QP_STATE_RTS: ++ return IB_QPS_RTS; ++ case ERDMA_QP_STATE_CLOSING: ++ return IB_QPS_ERR; ++ case ERDMA_QP_STATE_TERMINATE: ++ return IB_QPS_ERR; ++ case ERDMA_QP_STATE_ERROR: ++ return IB_QPS_ERR; ++ default: ++ return IB_QPS_ERR; ++ } ++} ++ + int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, + int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) + { +- struct erdma_qp *qp; + struct erdma_dev *dev; ++ struct erdma_qp *qp; + + if (ibqp && qp_attr && qp_init_attr) { + qp = to_eqp(ibqp); +@@ -1571,6 +1591,9 @@ int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, + + qp_init_attr->cap = qp_attr->cap; + ++ qp_attr->qp_state = query_qp_state(qp); ++ qp_attr->cur_qp_state = query_qp_state(qp); ++ + return 0; + } + +diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c +index c4ac06a3386969..7ebf80504fd125 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_hem.c ++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c +@@ -1098,9 +1098,9 @@ static bool hem_list_is_bottom_bt(int hopnum, int bt_level) + * @bt_level: base address table level + * @unit: ba entries per bt page + */ +-static u32 hem_list_calc_ba_range(int hopnum, int bt_level, int unit) ++static u64 hem_list_calc_ba_range(int hopnum, int bt_level, int unit) + { +- u32 step; ++ u64 step; + int max; + int i; + +@@ -1136,7 +1136,7 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions, + { + struct hns_roce_buf_region *r; + int total = 0; +- int step; ++ u64 step; + int i; + + for (i = 0; i < region_cnt; i++) { +@@ -1167,7 +1167,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev, + int ret = 0; + int max_ofs; + int level; +- u32 step; ++ u64 step; + int end; + + if (hopnum <= 1) +@@ -1191,10 +1191,12 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev, + + /* config L1 bt to last bt and link them to corresponding parent */ + for (level = 1; level < hopnum; level++) { +- cur = hem_list_search_item(&mid_bt[level], offset); +- if (cur) { +- hem_ptrs[level] = cur; +- continue; ++ if (!hem_list_is_bottom_bt(hopnum, level)) { ++ cur = hem_list_search_item(&mid_bt[level], offset); ++ if (cur) { ++ hem_ptrs[level] = cur; ++ continue; ++ } + } + + step = hem_list_calc_ba_range(hopnum, level, unit); +@@ -1204,7 +1206,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev, + } + + start_aligned = (distance / step) * step + r->offset; +- end = min_t(int, start_aligned + step - 1, max_ofs); ++ end = min_t(u64, start_aligned + step - 1, max_ofs); + cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit, + true); + if (!cur) { +@@ -1293,7 +1295,7 @@ static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base, + struct hns_roce_hem_item *hem, *temp_hem; + int total = 0; + int offset; +- int step; ++ u64 step; + + step = hem_list_calc_ba_range(r->hopnum, 1, unit); + if (step < 1) +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +index a49280e2df8ca0..8066750afab908 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +@@ -1653,8 +1653,8 @@ static int hns_roce_hw_v2_query_counter(struct hns_roce_dev *hr_dev, + + for (i = 0; i < HNS_ROCE_HW_CNT_TOTAL && i < *num_counters; i++) { + bd_idx = i / CNT_PER_DESC; +- if (!(desc[bd_idx].flag & HNS_ROCE_CMD_FLAG_NEXT) && +- bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC) ++ if (bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC && ++ !(desc[bd_idx].flag & cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT))) + break; + + cnt_data = (__le64 *)&desc[bd_idx].data[0]; +@@ -2932,6 +2932,9 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) + + static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) + { ++ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) ++ free_mr_exit(hr_dev); ++ + hns_roce_function_clear(hr_dev); + + if (!hr_dev->is_vf) +@@ -4391,12 +4394,14 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev, + upper_32_bits(to_hr_hw_page_addr(mtts[0]))); + hr_reg_clear(qpc_mask, QPC_RQ_CUR_BLK_ADDR_H); + +- context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1])); +- qpc_mask->rq_nxt_blk_addr = 0; +- +- hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H, +- upper_32_bits(to_hr_hw_page_addr(mtts[1]))); +- hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H); ++ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { ++ context->rq_nxt_blk_addr = ++ cpu_to_le32(to_hr_hw_page_addr(mtts[1])); ++ qpc_mask->rq_nxt_blk_addr = 0; ++ hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H, ++ upper_32_bits(to_hr_hw_page_addr(mtts[1]))); ++ hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H); ++ } + + return 0; + } +@@ -6065,6 +6070,7 @@ static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev, + struct pci_dev *pdev = hr_dev->pci_dev; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + const struct hnae3_ae_ops *ops = ae_dev->ops; ++ enum hnae3_reset_type reset_type; + irqreturn_t int_work = IRQ_NONE; + u32 int_en; + +@@ -6076,10 +6082,12 @@ static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev, + roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, + 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S); + ++ reset_type = hr_dev->is_vf ? ++ HNAE3_VF_FUNC_RESET : HNAE3_FUNC_RESET; ++ + /* Set reset level for reset_event() */ + if (ops->set_default_reset_request) +- ops->set_default_reset_request(ae_dev, +- HNAE3_FUNC_RESET); ++ ops->set_default_reset_request(ae_dev, reset_type); + if (ops->reset_event) + ops->reset_event(pdev, NULL); + +@@ -6149,7 +6157,7 @@ static u64 fmea_get_ram_res_addr(u32 res_type, __le64 *data) + res_type == ECC_RESOURCE_SCCC) + return le64_to_cpu(*data); + +- return le64_to_cpu(*data) << PAGE_SHIFT; ++ return le64_to_cpu(*data) << HNS_HW_PAGE_SHIFT; + } + + static int fmea_recover_others(struct hns_roce_dev *hr_dev, u32 res_type, +@@ -6777,9 +6785,6 @@ static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, + hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT; + hns_roce_handle_device_err(hr_dev); + +- if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) +- free_mr_exit(hr_dev); +- + hns_roce_exit(hr_dev); + kfree(hr_dev->priv); + ib_dealloc_device(&hr_dev->ib_dev); +diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c +index bff00b3af41fbe..04063cfacae5fc 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_qp.c ++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c +@@ -1379,19 +1379,19 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) + __acquire(&send_cq->lock); + __acquire(&recv_cq->lock); + } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { +- spin_lock_irq(&send_cq->lock); ++ spin_lock(&send_cq->lock); + __acquire(&recv_cq->lock); + } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { +- spin_lock_irq(&recv_cq->lock); ++ spin_lock(&recv_cq->lock); + __acquire(&send_cq->lock); + } else if (send_cq == recv_cq) { +- spin_lock_irq(&send_cq->lock); ++ spin_lock(&send_cq->lock); + __acquire(&recv_cq->lock); + } else if (send_cq->cqn < recv_cq->cqn) { +- spin_lock_irq(&send_cq->lock); ++ spin_lock(&send_cq->lock); + spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); + } else { +- spin_lock_irq(&recv_cq->lock); ++ spin_lock(&recv_cq->lock); + spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); + } + } +@@ -1411,13 +1411,13 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, + spin_unlock(&recv_cq->lock); + } else if (send_cq == recv_cq) { + __release(&recv_cq->lock); +- spin_unlock_irq(&send_cq->lock); ++ spin_unlock(&send_cq->lock); + } else if (send_cq->cqn < recv_cq->cqn) { + spin_unlock(&recv_cq->lock); +- spin_unlock_irq(&send_cq->lock); ++ spin_unlock(&send_cq->lock); + } else { + spin_unlock(&send_cq->lock); +- spin_unlock_irq(&recv_cq->lock); ++ spin_unlock(&recv_cq->lock); + } + } + +diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c +index ca3e89909ecf4c..38cecb28d322e4 100644 +--- a/drivers/infiniband/hw/irdma/verbs.c ++++ b/drivers/infiniband/hw/irdma/verbs.c +@@ -1347,7 +1347,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, + if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) { + ibdev_err(&iwdev->ibdev, + "rd_atomic = %d, above max_hw_ird=%d\n", +- attr->max_rd_atomic, ++ attr->max_dest_rd_atomic, + dev->hw_attrs.max_hw_ird); + return -EINVAL; + } +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c +index 2d179bc56ce608..296af7a5c2794d 100644 +--- a/drivers/infiniband/hw/mlx5/main.c ++++ b/drivers/infiniband/hw/mlx5/main.c +@@ -539,7 +539,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u32 port_num, + if (!ndev) + goto out; + +- if (dev->lag_active) { ++ if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) { + rcu_read_lock(); + upper = netdev_master_upper_dev_get_rcu(ndev); + if (upper) { +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c +index 50a1786231c77b..9e465cf99733ee 100644 +--- a/drivers/infiniband/hw/mlx5/mr.c ++++ b/drivers/infiniband/hw/mlx5/mr.c +@@ -48,6 +48,7 @@ enum { + MAX_PENDING_REG_MR = 8, + }; + ++#define MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS 4 + #define MLX5_UMR_ALIGN 2048 + + static void +@@ -715,6 +716,7 @@ mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev, + { + struct rb_node *node = dev->cache.rb_root.rb_node; + struct mlx5_cache_ent *cur, *smallest = NULL; ++ u64 ndescs_limit; + int cmp; + + /* +@@ -733,10 +735,18 @@ mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev, + return cur; + } + ++ /* ++ * Limit the usage of mkeys larger than twice the required size while ++ * also allowing the usage of smallest cache entry for small MRs. ++ */ ++ ndescs_limit = max_t(u64, rb_key.ndescs * 2, ++ MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS); ++ + return (smallest && + smallest->rb_key.access_mode == rb_key.access_mode && + smallest->rb_key.access_flags == rb_key.access_flags && +- smallest->rb_key.ats == rb_key.ats) ? ++ smallest->rb_key.ats == rb_key.ats && ++ smallest->rb_key.ndescs <= ndescs_limit) ? + smallest : + NULL; + } +@@ -986,7 +996,7 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev) + mlx5_mkey_cache_debugfs_init(dev); + mutex_lock(&cache->rb_lock); + for (i = 0; i <= mkey_cache_max_order(dev); i++) { +- rb_key.ndescs = 1 << (i + 2); ++ rb_key.ndescs = MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS << i; + ent = mlx5r_cache_create_ent_locked(dev, rb_key, true); + if (IS_ERR(ent)) { + ret = PTR_ERR(ent); +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c +index 1aee62aa1515d4..82aa47efb8078d 100644 +--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c ++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c +@@ -626,6 +626,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) + */ + if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done)) + return; ++ clt_path->s.hb_missed_cnt = 0; + rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), + &imm_type, &imm_payload); + if (imm_type == RTRS_IO_RSP_IMM || +@@ -643,7 +644,6 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) + return rtrs_clt_recv_done(con, wc); + } else if (imm_type == RTRS_HB_ACK_IMM) { + WARN_ON(con->c.cid); +- clt_path->s.hb_missed_cnt = 0; + clt_path->s.hb_cur_latency = + ktime_sub(ktime_get(), clt_path->s.hb_last_sent); + if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) +@@ -670,6 +670,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) + /* + * Key invalidations from server side + */ ++ clt_path->s.hb_missed_cnt = 0; + WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE || + wc->wc_flags & IB_WC_WITH_IMM)); + WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done); +@@ -2341,6 +2342,12 @@ static int init_conns(struct rtrs_clt_path *clt_path) + if (err) + goto destroy; + } ++ ++ /* ++ * Set the cid to con_num - 1, since if we fail later, we want to stay in bounds. ++ */ ++ cid = clt_path->s.con_num - 1; ++ + err = alloc_path_reqs(clt_path); + if (err) + goto destroy; +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c +index 1d33efb8fb03be..94ac99a4f696e7 100644 +--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c ++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c +@@ -1229,6 +1229,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) + */ + if (WARN_ON(wc->wr_cqe != &io_comp_cqe)) + return; ++ srv_path->s.hb_missed_cnt = 0; + err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); + if (err) { + rtrs_err(s, "rtrs_post_recv(), err: %d\n", err); +diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c +index 61e8e43e9c2bbd..48981327440193 100644 +--- a/drivers/input/keyboard/adp5588-keys.c ++++ b/drivers/input/keyboard/adp5588-keys.c +@@ -627,7 +627,7 @@ static int adp5588_setup(struct adp5588_kpad *kpad) + + for (i = 0; i < KEYP_MAX_EVENT; i++) { + ret = adp5588_read(client, KEY_EVENTA); +- if (ret) ++ if (ret < 0) + return ret; + } + +diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h +index bad238f69a7afd..34d1f07ea4c304 100644 +--- a/drivers/input/serio/i8042-acpipnpio.h ++++ b/drivers/input/serio/i8042-acpipnpio.h +@@ -1120,6 +1120,43 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { + }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) + }, ++ /* ++ * Some TongFang barebones have touchpad and/or keyboard issues after ++ * suspend fixable with nomux + reset + noloop + nopnp. Luckily, none of ++ * them have an external PS/2 port so this can safely be set for all of ++ * them. ++ * TongFang barebones come with board_vendor and/or system_vendor set to ++ * a different value for each individual reseller. The only somewhat ++ * universal way to identify them is by board_name. ++ */ ++ { ++ .matches = { ++ DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"), ++ }, ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) ++ }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"), ++ }, ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) ++ }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_BOARD_NAME, "GMxXGxX"), ++ }, ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) ++ }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"), ++ }, ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) ++ }, + /* + * A lot of modern Clevo barebones have touchpad and/or keyboard issues + * after suspend fixable with nomux + reset + noloop + nopnp. Luckily, +diff --git a/drivers/input/touchscreen/ilitek_ts_i2c.c b/drivers/input/touchscreen/ilitek_ts_i2c.c +index 2f872e95fbbade..e719f5da68bf50 100644 +--- a/drivers/input/touchscreen/ilitek_ts_i2c.c ++++ b/drivers/input/touchscreen/ilitek_ts_i2c.c +@@ -37,6 +37,8 @@ + #define ILITEK_TP_CMD_GET_MCU_VER 0x61 + #define ILITEK_TP_CMD_GET_IC_MODE 0xC0 + ++#define ILITEK_TP_I2C_REPORT_ID 0x48 ++ + #define REPORT_COUNT_ADDRESS 61 + #define ILITEK_SUPPORT_MAX_POINT 40 + +@@ -160,15 +162,19 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts) + error = ilitek_i2c_write_and_read(ts, NULL, 0, 0, buf, 64); + if (error) { + dev_err(dev, "get touch info failed, err:%d\n", error); +- goto err_sync_frame; ++ return error; ++ } ++ ++ if (buf[0] != ILITEK_TP_I2C_REPORT_ID) { ++ dev_err(dev, "get touch info failed. Wrong id: 0x%02X\n", buf[0]); ++ return -EINVAL; + } + + report_max_point = buf[REPORT_COUNT_ADDRESS]; + if (report_max_point > ts->max_tp) { + dev_err(dev, "FW report max point:%d > panel info. max:%d\n", + report_max_point, ts->max_tp); +- error = -EINVAL; +- goto err_sync_frame; ++ return -EINVAL; + } + + count = DIV_ROUND_UP(report_max_point, packet_max_point); +@@ -178,7 +184,7 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts) + if (error) { + dev_err(dev, "get touch info. failed, cnt:%d, err:%d\n", + count, error); +- goto err_sync_frame; ++ return error; + } + } + +@@ -203,10 +209,10 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts) + ilitek_touch_down(ts, id, x, y); + } + +-err_sync_frame: + input_mt_sync_frame(input); + input_sync(input); +- return error; ++ ++ return 0; + } + + /* APIs of cmds for ILITEK Touch IC */ +diff --git a/drivers/interconnect/icc-clk.c b/drivers/interconnect/icc-clk.c +index d787f2ea36d97b..a91df709cfb2f3 100644 +--- a/drivers/interconnect/icc-clk.c ++++ b/drivers/interconnect/icc-clk.c +@@ -87,6 +87,7 @@ struct icc_provider *icc_clk_register(struct device *dev, + onecell = devm_kzalloc(dev, struct_size(onecell, nodes, 2 * num_clocks), GFP_KERNEL); + if (!onecell) + return ERR_PTR(-ENOMEM); ++ onecell->num_nodes = 2 * num_clocks; + + qp = devm_kzalloc(dev, struct_size(qp, clocks, num_clocks), GFP_KERNEL); + if (!qp) +@@ -133,8 +134,6 @@ struct icc_provider *icc_clk_register(struct device *dev, + onecell->nodes[j++] = node; + } + +- onecell->num_nodes = j; +- + ret = icc_provider_register(provider); + if (ret) + goto err; +diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c +index e9ef2e0a62f670..cbf0c46015125a 100644 +--- a/drivers/iommu/amd/io_pgtable_v2.c ++++ b/drivers/iommu/amd/io_pgtable_v2.c +@@ -50,7 +50,7 @@ static inline u64 set_pgtable_attr(u64 *page) + u64 prot; + + prot = IOMMU_PAGE_PRESENT | IOMMU_PAGE_RW | IOMMU_PAGE_USER; +- prot |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY; ++ prot |= IOMMU_PAGE_ACCESS; + + return (iommu_virt_to_phys(page) | prot); + } +diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +index 6e6cb19c81f594..d4915893601979 100644 +--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c ++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +@@ -278,6 +278,20 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu) + u32 smr; + int i; + ++ /* ++ * MSM8998 LPASS SMMU reports 13 context banks, but accessing ++ * the last context bank crashes the system. ++ */ ++ if (of_device_is_compatible(smmu->dev->of_node, "qcom,msm8998-smmu-v2") && ++ smmu->num_context_banks == 13) { ++ smmu->num_context_banks = 12; ++ } else if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm630-smmu-v2")) { ++ if (smmu->num_context_banks == 21) /* SDM630 / SDM660 A2NOC SMMU */ ++ smmu->num_context_banks = 7; ++ else if (smmu->num_context_banks == 14) /* SDM630 / SDM660 LPASS SMMU */ ++ smmu->num_context_banks = 13; ++ } ++ + /* + * Some platforms support more than the Arm SMMU architected maximum of + * 128 stream matching groups. For unknown reasons, the additional +@@ -334,6 +348,19 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu) + return 0; + } + ++static int qcom_adreno_smmuv2_cfg_probe(struct arm_smmu_device *smmu) ++{ ++ /* Support for 16K pages is advertised on some SoCs, but it doesn't seem to work */ ++ smmu->features &= ~ARM_SMMU_FEAT_FMT_AARCH64_16K; ++ ++ /* TZ protects several last context banks, hide them from Linux */ ++ if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm630-smmu-v2") && ++ smmu->num_context_banks == 5) ++ smmu->num_context_banks = 2; ++ ++ return 0; ++} ++ + static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) + { + struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; +@@ -424,6 +451,7 @@ static const struct arm_smmu_impl sdm845_smmu_500_impl = { + + static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = { + .init_context = qcom_adreno_smmu_init_context, ++ .cfg_probe = qcom_adreno_smmuv2_cfg_probe, + .def_domain_type = qcom_smmu_def_domain_type, + .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank, + .write_sctlr = qcom_adreno_smmu_write_sctlr, +diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c +index 2d22c027aa5982..e76b2293999481 100644 +--- a/drivers/iommu/iommufd/io_pagetable.c ++++ b/drivers/iommu/iommufd/io_pagetable.c +@@ -111,6 +111,7 @@ static int iopt_alloc_iova(struct io_pagetable *iopt, unsigned long *iova, + unsigned long page_offset = uptr % PAGE_SIZE; + struct interval_tree_double_span_iter used_span; + struct interval_tree_span_iter allowed_span; ++ unsigned long max_alignment = PAGE_SIZE; + unsigned long iova_alignment; + + lockdep_assert_held(&iopt->iova_rwsem); +@@ -130,6 +131,13 @@ static int iopt_alloc_iova(struct io_pagetable *iopt, unsigned long *iova, + roundup_pow_of_two(length), + 1UL << __ffs64(uptr)); + ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE ++ max_alignment = HPAGE_SIZE; ++#endif ++ /* Protect against ALIGN() overflow */ ++ if (iova_alignment >= max_alignment) ++ iova_alignment = max_alignment; ++ + if (iova_alignment < iopt->iova_alignment) + return -EINVAL; + +diff --git a/drivers/leds/leds-bd2606mvv.c b/drivers/leds/leds-bd2606mvv.c +index 3fda712d2f8095..c1181a35d0f762 100644 +--- a/drivers/leds/leds-bd2606mvv.c ++++ b/drivers/leds/leds-bd2606mvv.c +@@ -69,16 +69,14 @@ static const struct regmap_config bd2606mvv_regmap = { + + static int bd2606mvv_probe(struct i2c_client *client) + { +- struct fwnode_handle *np, *child; + struct device *dev = &client->dev; + struct bd2606mvv_priv *priv; + struct fwnode_handle *led_fwnodes[BD2606_MAX_LEDS] = { 0 }; + int active_pairs[BD2606_MAX_LEDS / 2] = { 0 }; + int err, reg; +- int i; ++ int i, j; + +- np = dev_fwnode(dev); +- if (!np) ++ if (!dev_fwnode(dev)) + return -ENODEV; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); +@@ -94,20 +92,18 @@ static int bd2606mvv_probe(struct i2c_client *client) + + i2c_set_clientdata(client, priv); + +- fwnode_for_each_available_child_node(np, child) { ++ device_for_each_child_node_scoped(dev, child) { + struct bd2606mvv_led *led; + + err = fwnode_property_read_u32(child, "reg", ®); +- if (err) { +- fwnode_handle_put(child); ++ if (err) + return err; +- } +- if (reg < 0 || reg >= BD2606_MAX_LEDS || led_fwnodes[reg]) { +- fwnode_handle_put(child); ++ ++ if (reg < 0 || reg >= BD2606_MAX_LEDS || led_fwnodes[reg]) + return -EINVAL; +- } ++ + led = &priv->leds[reg]; +- led_fwnodes[reg] = child; ++ led_fwnodes[reg] = fwnode_handle_get(child); + active_pairs[reg / 2]++; + led->priv = priv; + led->led_no = reg; +@@ -130,7 +126,8 @@ static int bd2606mvv_probe(struct i2c_client *client) + &priv->leds[i].ldev, + &init_data); + if (err < 0) { +- fwnode_handle_put(child); ++ for (j = i; j < BD2606_MAX_LEDS; j++) ++ fwnode_handle_put(led_fwnodes[j]); + return dev_err_probe(dev, err, + "couldn't register LED %s\n", + priv->leds[i].ldev.name); +diff --git a/drivers/leds/leds-pca995x.c b/drivers/leds/leds-pca995x.c +index 78215dff14997c..11c7bb69573e8c 100644 +--- a/drivers/leds/leds-pca995x.c ++++ b/drivers/leds/leds-pca995x.c +@@ -19,10 +19,6 @@ + #define PCA995X_MODE1 0x00 + #define PCA995X_MODE2 0x01 + #define PCA995X_LEDOUT0 0x02 +-#define PCA9955B_PWM0 0x08 +-#define PCA9952_PWM0 0x0A +-#define PCA9952_IREFALL 0x43 +-#define PCA9955B_IREFALL 0x45 + + /* Auto-increment disabled. Normal mode */ + #define PCA995X_MODE1_CFG 0x00 +@@ -34,17 +30,38 @@ + #define PCA995X_LDRX_MASK 0x3 + #define PCA995X_LDRX_BITS 2 + +-#define PCA995X_MAX_OUTPUTS 16 ++#define PCA995X_MAX_OUTPUTS 24 + #define PCA995X_OUTPUTS_PER_REG 4 + + #define PCA995X_IREFALL_FULL_CFG 0xFF + #define PCA995X_IREFALL_HALF_CFG (PCA995X_IREFALL_FULL_CFG / 2) + +-#define PCA995X_TYPE_NON_B 0 +-#define PCA995X_TYPE_B 1 +- + #define ldev_to_led(c) container_of(c, struct pca995x_led, ldev) + ++struct pca995x_chipdef { ++ unsigned int num_leds; ++ u8 pwm_base; ++ u8 irefall; ++}; ++ ++static const struct pca995x_chipdef pca9952_chipdef = { ++ .num_leds = 16, ++ .pwm_base = 0x0a, ++ .irefall = 0x43, ++}; ++ ++static const struct pca995x_chipdef pca9955b_chipdef = { ++ .num_leds = 16, ++ .pwm_base = 0x08, ++ .irefall = 0x45, ++}; ++ ++static const struct pca995x_chipdef pca9956b_chipdef = { ++ .num_leds = 24, ++ .pwm_base = 0x0a, ++ .irefall = 0x40, ++}; ++ + struct pca995x_led { + unsigned int led_no; + struct led_classdev ldev; +@@ -54,7 +71,7 @@ struct pca995x_led { + struct pca995x_chip { + struct regmap *regmap; + struct pca995x_led leds[PCA995X_MAX_OUTPUTS]; +- int btype; ++ const struct pca995x_chipdef *chipdef; + }; + + static int pca995x_brightness_set(struct led_classdev *led_cdev, +@@ -62,10 +79,11 @@ static int pca995x_brightness_set(struct led_classdev *led_cdev, + { + struct pca995x_led *led = ldev_to_led(led_cdev); + struct pca995x_chip *chip = led->chip; ++ const struct pca995x_chipdef *chipdef = chip->chipdef; + u8 ledout_addr, pwmout_addr; + int shift, ret; + +- pwmout_addr = (chip->btype ? PCA9955B_PWM0 : PCA9952_PWM0) + led->led_no; ++ pwmout_addr = chipdef->pwm_base + led->led_no; + ledout_addr = PCA995X_LEDOUT0 + (led->led_no / PCA995X_OUTPUTS_PER_REG); + shift = PCA995X_LDRX_BITS * (led->led_no % PCA995X_OUTPUTS_PER_REG); + +@@ -102,43 +120,38 @@ static const struct regmap_config pca995x_regmap = { + static int pca995x_probe(struct i2c_client *client) + { + struct fwnode_handle *led_fwnodes[PCA995X_MAX_OUTPUTS] = { 0 }; +- struct fwnode_handle *np, *child; + struct device *dev = &client->dev; ++ const struct pca995x_chipdef *chipdef; + struct pca995x_chip *chip; + struct pca995x_led *led; +- int i, btype, reg, ret; ++ int i, j, reg, ret; + +- btype = (unsigned long)device_get_match_data(&client->dev); ++ chipdef = device_get_match_data(&client->dev); + +- np = dev_fwnode(dev); +- if (!np) ++ if (!dev_fwnode(dev)) + return -ENODEV; + + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + +- chip->btype = btype; ++ chip->chipdef = chipdef; + chip->regmap = devm_regmap_init_i2c(client, &pca995x_regmap); + if (IS_ERR(chip->regmap)) + return PTR_ERR(chip->regmap); + + i2c_set_clientdata(client, chip); + +- fwnode_for_each_available_child_node(np, child) { ++ device_for_each_child_node_scoped(dev, child) { + ret = fwnode_property_read_u32(child, "reg", ®); +- if (ret) { +- fwnode_handle_put(child); ++ if (ret) + return ret; +- } + +- if (reg < 0 || reg >= PCA995X_MAX_OUTPUTS || led_fwnodes[reg]) { +- fwnode_handle_put(child); ++ if (reg < 0 || reg >= PCA995X_MAX_OUTPUTS || led_fwnodes[reg]) + return -EINVAL; +- } + + led = &chip->leds[reg]; +- led_fwnodes[reg] = child; ++ led_fwnodes[reg] = fwnode_handle_get(child); + led->chip = chip; + led->led_no = reg; + led->ldev.brightness_set_blocking = pca995x_brightness_set; +@@ -157,7 +170,8 @@ static int pca995x_probe(struct i2c_client *client) + &chip->leds[i].ldev, + &init_data); + if (ret < 0) { +- fwnode_handle_put(child); ++ for (j = i; j < PCA995X_MAX_OUTPUTS; j++) ++ fwnode_handle_put(led_fwnodes[j]); + return dev_err_probe(dev, ret, + "Could not register LED %s\n", + chip->leds[i].ldev.name); +@@ -170,21 +184,21 @@ static int pca995x_probe(struct i2c_client *client) + return ret; + + /* IREF Output current value for all LEDn outputs */ +- return regmap_write(chip->regmap, +- btype ? PCA9955B_IREFALL : PCA9952_IREFALL, +- PCA995X_IREFALL_HALF_CFG); ++ return regmap_write(chip->regmap, chipdef->irefall, PCA995X_IREFALL_HALF_CFG); + } + + static const struct i2c_device_id pca995x_id[] = { +- { "pca9952", .driver_data = (kernel_ulong_t)PCA995X_TYPE_NON_B }, +- { "pca9955b", .driver_data = (kernel_ulong_t)PCA995X_TYPE_B }, ++ { "pca9952", .driver_data = (kernel_ulong_t)&pca9952_chipdef }, ++ { "pca9955b", .driver_data = (kernel_ulong_t)&pca9955b_chipdef }, ++ { "pca9956b", .driver_data = (kernel_ulong_t)&pca9956b_chipdef }, + {} + }; + MODULE_DEVICE_TABLE(i2c, pca995x_id); + + static const struct of_device_id pca995x_of_match[] = { +- { .compatible = "nxp,pca9952", .data = (void *)PCA995X_TYPE_NON_B }, +- { .compatible = "nxp,pca9955b", .data = (void *)PCA995X_TYPE_B }, ++ { .compatible = "nxp,pca9952", .data = &pca9952_chipdef }, ++ { .compatible = "nxp,pca9955b", . data = &pca9955b_chipdef }, ++ { .compatible = "nxp,pca9956b", .data = &pca9956b_chipdef }, + {}, + }; + MODULE_DEVICE_TABLE(of, pca995x_of_match); +diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c +index f7e9a3632eb3d9..499f8cc8a39fbf 100644 +--- a/drivers/md/dm-rq.c ++++ b/drivers/md/dm-rq.c +@@ -496,8 +496,10 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, + + map = dm_get_live_table(md, &srcu_idx); + if (unlikely(!map)) { ++ DMERR_LIMIT("%s: mapping table unavailable, erroring io", ++ dm_device_name(md)); + dm_put_live_table(md, srcu_idx); +- return BLK_STS_RESOURCE; ++ return BLK_STS_IOERR; + } + ti = dm_table_find_target(map, 0); + dm_put_live_table(md, srcu_idx); +diff --git a/drivers/md/dm.c b/drivers/md/dm.c +index 8ec0a263744a50..5dd0a42463a2b8 100644 +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -1817,10 +1817,15 @@ static void dm_submit_bio(struct bio *bio) + struct dm_table *map; + + map = dm_get_live_table(md, &srcu_idx); ++ if (unlikely(!map)) { ++ DMERR_LIMIT("%s: mapping table unavailable, erroring io", ++ dm_device_name(md)); ++ bio_io_error(bio); ++ goto out; ++ } + +- /* If suspended, or map not yet available, queue this IO for later */ +- if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || +- unlikely(!map)) { ++ /* If suspended, queue this IO for later */ ++ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { + if (bio->bi_opf & REQ_NOWAIT) + bio_wouldblock_error(bio); + else if (bio->bi_opf & REQ_RAHEAD) +diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c +index 35c969fd2cb5e6..4e59734ec53e2d 100644 +--- a/drivers/media/dvb-frontends/rtl2830.c ++++ b/drivers/media/dvb-frontends/rtl2830.c +@@ -609,7 +609,7 @@ static int rtl2830_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid, int on + index, pid, onoff); + + /* skip invalid PIDs (0x2000) */ +- if (pid > 0x1fff || index > 32) ++ if (pid > 0x1fff || index >= 32) + return 0; + + if (onoff) +diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c +index 601cf45c39358c..e6a7877a985413 100644 +--- a/drivers/media/dvb-frontends/rtl2832.c ++++ b/drivers/media/dvb-frontends/rtl2832.c +@@ -983,7 +983,7 @@ static int rtl2832_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid, + index, pid, onoff, dev->slave_ts); + + /* skip invalid PIDs (0x2000) */ +- if (pid > 0x1fff || index > 32) ++ if (pid > 0x1fff || index >= 32) + return 0; + + if (onoff) +diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c +index 5600f1df653d2f..b55fbd6a8a669c 100644 +--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c ++++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c +@@ -347,11 +347,16 @@ static int vdec_h264_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs, + return vpu_dec_reset(vpu); + + fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx); ++ if (!fb) { ++ mtk_vdec_err(inst->ctx, "fb buffer is NULL"); ++ return -ENOMEM; ++ } ++ + src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer); + dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer); + +- y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0; +- c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0; ++ y_fb_dma = fb->base_y.dma_addr; ++ c_fb_dma = fb->base_c.dma_addr; + + mtk_vdec_debug(inst->ctx, "+ [%d] FB y_dma=%llx c_dma=%llx va=%p", + inst->num_nalu, y_fb_dma, c_fb_dma, fb); +diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c +index 0e741e0dc8bacd..bb0ad93c6b789f 100644 +--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c ++++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c +@@ -724,11 +724,16 @@ static int vdec_h264_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs + return vpu_dec_reset(vpu); + + fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx); ++ if (!fb) { ++ mtk_vdec_err(inst->ctx, "fb buffer is NULL"); ++ return -ENOMEM; ++ } ++ + src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer); + dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer); + +- y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0; +- c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0; ++ y_fb_dma = fb->base_y.dma_addr; ++ c_fb_dma = fb->base_c.dma_addr; + mtk_vdec_debug(inst->ctx, "[h264-dec] [%d] y_dma=%llx c_dma=%llx", + inst->ctx->decoded_frame_cnt, y_fb_dma, c_fb_dma); + +diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c +index f64b21c0716967..86b93055f16337 100644 +--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c ++++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c +@@ -335,14 +335,18 @@ static int vdec_vp8_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs, + src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer); + + fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx); +- dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer); ++ if (!fb) { ++ mtk_vdec_err(inst->ctx, "fb buffer is NULL"); ++ return -ENOMEM; ++ } + +- y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0; ++ dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer); ++ y_fb_dma = fb->base_y.dma_addr; + if (inst->ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1) + c_fb_dma = y_fb_dma + + inst->ctx->picinfo.buf_w * inst->ctx->picinfo.buf_h; + else +- c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0; ++ c_fb_dma = fb->base_c.dma_addr; + + inst->vsi->dec.bs_dma = (u64)bs->dma_addr; + inst->vsi->dec.bs_sz = bs->size; +diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c +index ad2bd71037abdb..246eec259c5d7c 100644 +--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c ++++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c +@@ -854,6 +854,7 @@ static const struct of_device_id rzg2l_csi2_of_table[] = { + { .compatible = "renesas,rzg2l-csi2", }, + { /* sentinel */ } + }; ++MODULE_DEVICE_TABLE(of, rzg2l_csi2_of_table); + + static struct platform_driver rzg2l_csi2_pdrv = { + .remove_new = rzg2l_csi2_remove, +diff --git a/drivers/media/tuners/tuner-i2c.h b/drivers/media/tuners/tuner-i2c.h +index 07aeead0644a31..724952e001cd13 100644 +--- a/drivers/media/tuners/tuner-i2c.h ++++ b/drivers/media/tuners/tuner-i2c.h +@@ -133,10 +133,8 @@ static inline int tuner_i2c_xfer_send_recv(struct tuner_i2c_props *props, + } \ + if (0 == __ret) { \ + state = kzalloc(sizeof(type), GFP_KERNEL); \ +- if (!state) { \ +- __ret = -ENOMEM; \ ++ if (NULL == state) \ + goto __fail; \ +- } \ + state->i2c_props.addr = i2caddr; \ + state->i2c_props.adap = i2cadap; \ + state->i2c_props.name = devname; \ +diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c +index 36e060386e59df..59e1b3a4406ed1 100644 +--- a/drivers/mtd/devices/powernv_flash.c ++++ b/drivers/mtd/devices/powernv_flash.c +@@ -207,6 +207,9 @@ static int powernv_flash_set_driver_info(struct device *dev, + * get them + */ + mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); ++ if (!mtd->name) ++ return -ENOMEM; ++ + mtd->type = MTD_NORFLASH; + mtd->flags = MTD_WRITEABLE; + mtd->size = size; +diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c +index 28131a127d065e..8297b366a06699 100644 +--- a/drivers/mtd/devices/slram.c ++++ b/drivers/mtd/devices/slram.c +@@ -296,10 +296,12 @@ static int __init init_slram(void) + T("slram: devname = %s\n", devname); + if ((!map) || (!(devstart = strsep(&map, ",")))) { + E("slram: No devicestart specified.\n"); ++ break; + } + T("slram: devstart = %s\n", devstart); + if ((!map) || (!(devlength = strsep(&map, ",")))) { + E("slram: No devicelength / -end specified.\n"); ++ break; + } + T("slram: devlength = %s\n", devlength); + if (parse_cmdline(devname, devstart, devlength) != 0) { +diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c +index 29c8bddde67ff6..161a409ca4ed21 100644 +--- a/drivers/mtd/nand/raw/mtk_nand.c ++++ b/drivers/mtd/nand/raw/mtk_nand.c +@@ -1429,16 +1429,32 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, + return 0; + } + ++static void mtk_nfc_nand_chips_cleanup(struct mtk_nfc *nfc) ++{ ++ struct mtk_nfc_nand_chip *mtk_chip; ++ struct nand_chip *chip; ++ int ret; ++ ++ while (!list_empty(&nfc->chips)) { ++ mtk_chip = list_first_entry(&nfc->chips, ++ struct mtk_nfc_nand_chip, node); ++ chip = &mtk_chip->nand; ++ ret = mtd_device_unregister(nand_to_mtd(chip)); ++ WARN_ON(ret); ++ nand_cleanup(chip); ++ list_del(&mtk_chip->node); ++ } ++} ++ + static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc) + { + struct device_node *np = dev->of_node; +- struct device_node *nand_np; + int ret; + +- for_each_child_of_node(np, nand_np) { ++ for_each_child_of_node_scoped(np, nand_np) { + ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np); + if (ret) { +- of_node_put(nand_np); ++ mtk_nfc_nand_chips_cleanup(nfc); + return ret; + } + } +@@ -1570,20 +1586,8 @@ static int mtk_nfc_probe(struct platform_device *pdev) + static void mtk_nfc_remove(struct platform_device *pdev) + { + struct mtk_nfc *nfc = platform_get_drvdata(pdev); +- struct mtk_nfc_nand_chip *mtk_chip; +- struct nand_chip *chip; +- int ret; +- +- while (!list_empty(&nfc->chips)) { +- mtk_chip = list_first_entry(&nfc->chips, +- struct mtk_nfc_nand_chip, node); +- chip = &mtk_chip->nand; +- ret = mtd_device_unregister(nand_to_mtd(chip)); +- WARN_ON(ret); +- nand_cleanup(chip); +- list_del(&mtk_chip->node); +- } + ++ mtk_nfc_nand_chips_cleanup(nfc); + mtk_ecc_release(nfc->ecc); + } + +diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c +index 277493e41b0723..54767154de265c 100644 +--- a/drivers/net/bareudp.c ++++ b/drivers/net/bareudp.c +@@ -67,6 +67,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) + __be16 proto; + void *oiph; + int err; ++ int nh; + + bareudp = rcu_dereference_sk_user_data(sk); + if (!bareudp) +@@ -144,10 +145,25 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) + } + skb_dst_set(skb, &tun_dst->dst); + skb->dev = bareudp->dev; +- oiph = skb_network_header(skb); +- skb_reset_network_header(skb); + skb_reset_mac_header(skb); + ++ /* Save offset of outer header relative to skb->head, ++ * because we are going to reset the network header to the inner header ++ * and might change skb->head. ++ */ ++ nh = skb_network_header(skb) - skb->head; ++ ++ skb_reset_network_header(skb); ++ ++ if (!pskb_inet_may_pull(skb)) { ++ DEV_STATS_INC(bareudp->dev, rx_length_errors); ++ DEV_STATS_INC(bareudp->dev, rx_errors); ++ goto drop; ++ } ++ ++ /* Get the outer header. */ ++ oiph = skb->head + nh; ++ + if (!ipv6_mod_enabled() || family == AF_INET) + err = IP_ECN_decapsulate(oiph, skb); + else +@@ -303,6 +319,9 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev, + __be32 saddr; + int err; + ++ if (!skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB))) ++ return -EINVAL; ++ + if (!sock) + return -ESHUTDOWN; + +@@ -366,6 +385,9 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev, + __be16 sport; + int err; + ++ if (!skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB))) ++ return -EINVAL; ++ + if (!sock) + return -ESHUTDOWN; + +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index 53a7b53618d942..14b4780b73c724 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -5534,9 +5534,9 @@ bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp) + break; + + default: +- /* Should never happen. Mode guarded by bond_xdp_check() */ +- netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond)); +- WARN_ON_ONCE(1); ++ if (net_ratelimit()) ++ netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", ++ BOND_MODE(bond)); + return NULL; + } + +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c +index fb77fd74de27fb..97666a7595959d 100644 +--- a/drivers/net/can/m_can/m_can.c ++++ b/drivers/net/can/m_can/m_can.c +@@ -1599,11 +1599,7 @@ static int m_can_close(struct net_device *dev) + + netif_stop_queue(dev); + +- if (!cdev->is_peripheral) +- napi_disable(&cdev->napi); +- + m_can_stop(dev); +- m_can_clk_stop(cdev); + free_irq(dev->irq, dev); + + if (cdev->is_peripheral) { +@@ -1611,10 +1607,13 @@ static int m_can_close(struct net_device *dev) + destroy_workqueue(cdev->tx_wq); + cdev->tx_wq = NULL; + can_rx_offload_disable(&cdev->offload); ++ } else { ++ napi_disable(&cdev->napi); + } + + close_candev(dev); + ++ m_can_clk_stop(cdev); + phy_power_off(cdev->transceiver); + + return 0; +@@ -1842,6 +1841,8 @@ static int m_can_open(struct net_device *dev) + + if (cdev->is_peripheral) + can_rx_offload_enable(&cdev->offload); ++ else ++ napi_enable(&cdev->napi); + + /* register interrupt handler */ + if (cdev->is_peripheral) { +@@ -1873,9 +1874,6 @@ static int m_can_open(struct net_device *dev) + if (err) + goto exit_start_fail; + +- if (!cdev->is_peripheral) +- napi_enable(&cdev->napi); +- + netif_start_queue(dev); + + return 0; +@@ -1889,6 +1887,8 @@ static int m_can_open(struct net_device *dev) + out_wq_fail: + if (cdev->is_peripheral) + can_rx_offload_disable(&cdev->offload); ++ else ++ napi_disable(&cdev->napi); + close_candev(dev); + exit_disable_clks: + m_can_clk_stop(cdev); +diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c +index 41a0e4261d15e9..03ad10b01867d8 100644 +--- a/drivers/net/can/usb/esd_usb.c ++++ b/drivers/net/can/usb/esd_usb.c +@@ -3,7 +3,7 @@ + * CAN driver for esd electronics gmbh CAN-USB/2, CAN-USB/3 and CAN-USB/Micro + * + * Copyright (C) 2010-2012 esd electronic system design gmbh, Matthias Fuchs +- * Copyright (C) 2022-2023 esd electronics gmbh, Frank Jungclaus ++ * Copyright (C) 2022-2024 esd electronics gmbh, Frank Jungclaus + */ + + #include +@@ -1116,9 +1116,6 @@ static int esd_usb_3_set_bittiming(struct net_device *netdev) + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + flags |= ESD_USB_3_BAUDRATE_FLAG_LOM; + +- if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) +- flags |= ESD_USB_3_BAUDRATE_FLAG_TRS; +- + baud_x->nom.brp = cpu_to_le16(nom_bt->brp & (nom_btc->brp_max - 1)); + baud_x->nom.sjw = cpu_to_le16(nom_bt->sjw & (nom_btc->sjw_max - 1)); + baud_x->nom.tseg1 = cpu_to_le16((nom_bt->prop_seg + nom_bt->phase_seg1) +@@ -1219,7 +1216,6 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index) + switch (le16_to_cpu(dev->udev->descriptor.idProduct)) { + case ESD_USB_CANUSB3_PRODUCT_ID: + priv->can.clock.freq = ESD_USB_3_CAN_CLOCK; +- priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; + priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD; + priv->can.bittiming_const = &esd_usb_3_nom_bittiming_const; + priv->can.data_bittiming_const = &esd_usb_3_data_bittiming_const; +diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c +index 0f5a4ec505ddbf..18e3a9cd4fc017 100644 +--- a/drivers/net/ethernet/freescale/enetc/enetc.c ++++ b/drivers/net/ethernet/freescale/enetc/enetc.c +@@ -2305,12 +2305,11 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv) + + snprintf(v->name, sizeof(v->name), "%s-rxtx%d", + priv->ndev->name, i); +- err = request_irq(irq, enetc_msix, 0, v->name, v); ++ err = request_irq(irq, enetc_msix, IRQF_NO_AUTOEN, v->name, v); + if (err) { + dev_err(priv->dev, "request_irq() failed!\n"); + goto irq_err; + } +- disable_irq(irq); + + v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER); + v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER); +diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c +index b50f16786c246a..6ab89f4782857d 100644 +--- a/drivers/net/ethernet/realtek/r8169_phy_config.c ++++ b/drivers/net/ethernet/realtek/r8169_phy_config.c +@@ -1060,6 +1060,7 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp, + phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000); + rtl8168g_enable_gphy_10m(phydev); + ++ rtl8168g_disable_aldps(phydev); + rtl8125a_config_eee_phy(phydev); + } + +@@ -1099,6 +1100,7 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, + phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000); + + rtl8125_legacy_force_mode(phydev); ++ rtl8168g_disable_aldps(phydev); + rtl8125b_config_eee_phy(phydev); + } + +diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c +index c672f92d65e976..9319a2675e7b65 100644 +--- a/drivers/net/ethernet/seeq/ether3.c ++++ b/drivers/net/ethernet/seeq/ether3.c +@@ -847,9 +847,11 @@ static void ether3_remove(struct expansion_card *ec) + { + struct net_device *dev = ecard_get_drvdata(ec); + ++ ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2); + ecard_set_drvdata(ec, NULL); + + unregister_netdev(dev); ++ del_timer_sync(&priv(dev)->timer); + free_netdev(dev); + ecard_release_resources(ec); + } +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +index 9e40c28d453ab1..ee3604f58def52 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +@@ -35,6 +35,9 @@ static int loongson_default_data(struct plat_stmmacenet_data *plat) + /* Disable RX queues routing by default */ + plat->rx_queues_cfg[0].pkt_route = 0x0; + ++ plat->clk_ref_rate = 125000000; ++ plat->clk_ptp_rate = 125000000; ++ + /* Default to phy auto-detection */ + plat->phy_addr = -1; + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index d6167a7b19f21b..89a80e3e8bb88f 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -2018,7 +2018,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, + rx_q->queue_index = queue; + rx_q->priv_data = priv; + +- pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; ++ pp_params.flags = PP_FLAG_DMA_MAP | (xdp_prog ? PP_FLAG_DMA_SYNC_DEV : 0); + pp_params.pool_size = dma_conf->dma_rx_size; + num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); + pp_params.order = ilog2(num_pages); +diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c +index bba44ff0e2872e..c37500aa063791 100644 +--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c ++++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c +@@ -1585,7 +1585,7 @@ static void wx_set_num_queues(struct wx *wx) + */ + static int wx_acquire_msix_vectors(struct wx *wx) + { +- struct irq_affinity affd = { .pre_vectors = 1 }; ++ struct irq_affinity affd = {0, }; + int nvecs, i; + + nvecs = min_t(int, num_online_cpus(), wx->mac.max_msix_vectors); +diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +index 65d7aaad43fe90..62c10eb4f0adf1 100644 +--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +@@ -652,15 +652,15 @@ static int axienet_device_reset(struct net_device *ndev) + * + * Would either be called after a successful transmit operation, or after + * there was an error when setting up the chain. +- * Returns the number of descriptors handled. ++ * Returns the number of packets handled. + */ + static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, + int nr_bds, bool force, u32 *sizep, int budget) + { + struct axidma_bd *cur_p; + unsigned int status; ++ int i, packets = 0; + dma_addr_t phys; +- int i; + + for (i = 0; i < nr_bds; i++) { + cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; +@@ -679,8 +679,10 @@ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, + (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), + DMA_TO_DEVICE); + +- if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) ++ if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) { + napi_consume_skb(cur_p->skb, budget); ++ packets++; ++ } + + cur_p->app0 = 0; + cur_p->app1 = 0; +@@ -696,7 +698,13 @@ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, + *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; + } + +- return i; ++ if (!force) { ++ lp->tx_bd_ci += i; ++ if (lp->tx_bd_ci >= lp->tx_bd_num) ++ lp->tx_bd_ci %= lp->tx_bd_num; ++ } ++ ++ return packets; + } + + /** +@@ -747,13 +755,10 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget) + u32 size = 0; + int packets; + +- packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget); ++ packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false, ++ &size, budget); + + if (packets) { +- lp->tx_bd_ci += packets; +- if (lp->tx_bd_ci >= lp->tx_bd_num) +- lp->tx_bd_ci %= lp->tx_bd_num; +- + u64_stats_update_begin(&lp->tx_stat_sync); + u64_stats_add(&lp->tx_packets, packets); + u64_stats_add(&lp->tx_bytes, size); +@@ -1042,9 +1047,10 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev) + u32 cr = lp->tx_dma_cr; + + cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); +- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); +- +- napi_schedule(&lp->napi_tx); ++ if (napi_schedule_prep(&lp->napi_tx)) { ++ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); ++ __napi_schedule(&lp->napi_tx); ++ } + } + + return IRQ_HANDLED; +@@ -1086,9 +1092,10 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev) + u32 cr = lp->rx_dma_cr; + + cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); +- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); +- +- napi_schedule(&lp->napi_rx); ++ if (napi_schedule_prep(&lp->napi_rx)) { ++ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); ++ __napi_schedule(&lp->napi_rx); ++ } + } + + return IRQ_HANDLED; +diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c +index 6cc1b56ddde2fc..60c58dd6d25311 100644 +--- a/drivers/net/usb/usbnet.c ++++ b/drivers/net/usb/usbnet.c +@@ -464,10 +464,15 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, + void usbnet_defer_kevent (struct usbnet *dev, int work) + { + set_bit (work, &dev->flags); +- if (!schedule_work (&dev->kevent)) +- netdev_dbg(dev->net, "kevent %s may have been dropped\n", usbnet_event_names[work]); +- else +- netdev_dbg(dev->net, "kevent %s scheduled\n", usbnet_event_names[work]); ++ if (!usbnet_going_away(dev)) { ++ if (!schedule_work(&dev->kevent)) ++ netdev_dbg(dev->net, ++ "kevent %s may have been dropped\n", ++ usbnet_event_names[work]); ++ else ++ netdev_dbg(dev->net, ++ "kevent %s scheduled\n", usbnet_event_names[work]); ++ } + } + EXPORT_SYMBOL_GPL(usbnet_defer_kevent); + +@@ -535,7 +540,8 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) + tasklet_schedule (&dev->bh); + break; + case 0: +- __usbnet_queue_skb(&dev->rxq, skb, rx_start); ++ if (!usbnet_going_away(dev)) ++ __usbnet_queue_skb(&dev->rxq, skb, rx_start); + } + } else { + netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); +@@ -843,9 +849,18 @@ int usbnet_stop (struct net_device *net) + + /* deferred work (timer, softirq, task) must also stop */ + dev->flags = 0; +- del_timer_sync (&dev->delay); +- tasklet_kill (&dev->bh); ++ del_timer_sync(&dev->delay); ++ tasklet_kill(&dev->bh); + cancel_work_sync(&dev->kevent); ++ ++ /* We have cyclic dependencies. Those calls are needed ++ * to break a cycle. We cannot fall into the gaps because ++ * we have a flag ++ */ ++ tasklet_kill(&dev->bh); ++ del_timer_sync(&dev->delay); ++ cancel_work_sync(&dev->kevent); ++ + if (!pm) + usb_autopm_put_interface(dev->intf); + +@@ -1171,7 +1186,8 @@ usbnet_deferred_kevent (struct work_struct *work) + status); + } else { + clear_bit (EVENT_RX_HALT, &dev->flags); +- tasklet_schedule (&dev->bh); ++ if (!usbnet_going_away(dev)) ++ tasklet_schedule(&dev->bh); + } + } + +@@ -1196,7 +1212,8 @@ usbnet_deferred_kevent (struct work_struct *work) + usb_autopm_put_interface(dev->intf); + fail_lowmem: + if (resched) +- tasklet_schedule (&dev->bh); ++ if (!usbnet_going_away(dev)) ++ tasklet_schedule(&dev->bh); + } + } + +@@ -1559,6 +1576,7 @@ static void usbnet_bh (struct timer_list *t) + } else if (netif_running (dev->net) && + netif_device_present (dev->net) && + netif_carrier_ok(dev->net) && ++ !usbnet_going_away(dev) && + !timer_pending(&dev->delay) && + !test_bit(EVENT_RX_PAUSED, &dev->flags) && + !test_bit(EVENT_RX_HALT, &dev->flags)) { +@@ -1606,6 +1624,7 @@ void usbnet_disconnect (struct usb_interface *intf) + usb_set_intfdata(intf, NULL); + if (!dev) + return; ++ usbnet_mark_going_away(dev); + + xdev = interface_to_usbdev (intf); + +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c +index bc01f2dafa9488..2da3be3fb9423c 100644 +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -1269,6 +1269,11 @@ static struct sk_buff *receive_small(struct net_device *dev, + struct page *page = virt_to_head_page(buf); + struct sk_buff *skb; + ++ /* We passed the address of virtnet header to virtio-core, ++ * so truncate the padding. ++ */ ++ buf -= VIRTNET_RX_PAD + xdp_headroom; ++ + len -= vi->hdr_len; + u64_stats_add(&stats->bytes, len); + +@@ -1859,8 +1864,9 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, + if (unlikely(!buf)) + return -ENOMEM; + +- virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom, +- vi->hdr_len + GOOD_PACKET_LEN); ++ buf += VIRTNET_RX_PAD + xdp_headroom; ++ ++ virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN); + + err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); + if (err < 0) { +diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c +index dd2a7c95517be2..4bb30e40372877 100644 +--- a/drivers/net/wireless/ath/ath12k/mac.c ++++ b/drivers/net/wireless/ath/ath12k/mac.c +@@ -1681,9 +1681,8 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar, + * request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu + * length. + */ +- ampdu_factor = (he_cap->he_cap_elem.mac_cap_info[3] & +- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) >> +- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK; ++ ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3], ++ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK); + + if (ampdu_factor) { + if (sta->deflink.vht_cap.vht_supported) +diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c +index 21399ad233c022..9105fdd14c6671 100644 +--- a/drivers/net/wireless/ath/ath12k/wmi.c ++++ b/drivers/net/wireless/ath/ath12k/wmi.c +@@ -1501,6 +1501,7 @@ int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar, + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST, + sizeof(*cmd)); + cmd->req_type = cpu_to_le32(type); ++ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI bss chan info req type %d\n", type); +diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h +index c75a6fa1f7e089..a19a2c29f2264a 100644 +--- a/drivers/net/wireless/ath/ath12k/wmi.h ++++ b/drivers/net/wireless/ath/ath12k/wmi.h +@@ -3058,6 +3058,7 @@ struct wmi_pdev_bss_chan_info_req_cmd { + __le32 tlv_header; + /* ref wmi_bss_chan_info_req_type */ + __le32 req_type; ++ __le32 pdev_id; + } __packed; + + struct wmi_ap_ps_peer_cmd { +@@ -4033,7 +4034,6 @@ struct wmi_vdev_stopped_event { + } __packed; + + struct wmi_pdev_bss_chan_info_event { +- __le32 pdev_id; + __le32 freq; /* Units in MHz */ + __le32 noise_floor; /* units are dBm */ + /* rx clear - how often the channel was unused */ +@@ -4051,6 +4051,7 @@ struct wmi_pdev_bss_chan_info_event { + /*rx_cycle cnt for my bss in 64bits format */ + __le32 rx_bss_cycle_count_low; + __le32 rx_bss_cycle_count_high; ++ __le32 pdev_id; + } __packed; + + #define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0 +diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c +index a0376a6787b8d0..808fb6747a7f78 100644 +--- a/drivers/net/wireless/ath/ath9k/debug.c ++++ b/drivers/net/wireless/ath/ath9k/debug.c +@@ -1380,8 +1380,6 @@ int ath9k_init_debug(struct ath_hw *ah) + + sc->debug.debugfs_phy = debugfs_create_dir("ath9k", + sc->hw->wiphy->debugfsdir); +- if (IS_ERR(sc->debug.debugfs_phy)) +- return -ENOMEM; + + #ifdef CONFIG_ATH_DEBUG + debugfs_create_file("debug", 0600, sc->debug.debugfs_phy, +diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c +index 278ddc713fdc25..7b145282243190 100644 +--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c ++++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c +@@ -486,8 +486,6 @@ int ath9k_htc_init_debug(struct ath_hw *ah) + + priv->debug.debugfs_phy = debugfs_create_dir(KBUILD_MODNAME, + priv->hw->wiphy->debugfsdir); +- if (IS_ERR(priv->debug.debugfs_phy)) +- return -ENOMEM; + + ath9k_cmn_spectral_init_debug(&priv->spec_priv, priv->debug.debugfs_phy); + +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c +index 7ea2631b80692d..00794086cc7c97 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c +@@ -123,7 +123,7 @@ static s32 brcmf_btcoex_params_read(struct brcmf_if *ifp, u32 addr, u32 *data) + { + *data = addr; + +- return brcmf_fil_iovar_int_get(ifp, "btc_params", data); ++ return brcmf_fil_iovar_int_query(ifp, "btc_params", data); + } + + /** +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +index da4968e66725bc..c708ae91c3ce93 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +@@ -663,8 +663,8 @@ static int brcmf_cfg80211_request_sta_if(struct brcmf_if *ifp, u8 *macaddr) + /* interface_create version 3+ */ + /* get supported version from firmware side */ + iface_create_ver = 0; +- err = brcmf_fil_bsscfg_int_get(ifp, "interface_create", +- &iface_create_ver); ++ err = brcmf_fil_bsscfg_int_query(ifp, "interface_create", ++ &iface_create_ver); + if (err) { + brcmf_err("fail to get supported version, err=%d\n", err); + return -EOPNOTSUPP; +@@ -756,8 +756,8 @@ static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp) + /* interface_create version 3+ */ + /* get supported version from firmware side */ + iface_create_ver = 0; +- err = brcmf_fil_bsscfg_int_get(ifp, "interface_create", +- &iface_create_ver); ++ err = brcmf_fil_bsscfg_int_query(ifp, "interface_create", ++ &iface_create_ver); + if (err) { + brcmf_err("fail to get supported version, err=%d\n", err); + return -EOPNOTSUPP; +@@ -2101,7 +2101,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) + if (!sme->crypto.n_akm_suites) + return 0; + +- err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev), "wpa_auth", &val); ++ err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev), ++ "wpa_auth", &val); + if (err) { + bphy_err(drvr, "could not get wpa_auth (%d)\n", err); + return err; +@@ -2680,7 +2681,7 @@ brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_cfg80211_vif *vif = wdev_to_vif(wdev); + struct brcmf_pub *drvr = cfg->pub; +- s32 qdbm = 0; ++ s32 qdbm; + s32 err; + + brcmf_dbg(TRACE, "Enter\n"); +@@ -7046,8 +7047,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, + ch.bw = BRCMU_CHAN_BW_20; + cfg->d11inf.encchspec(&ch); + chaninfo = ch.chspec; +- err = brcmf_fil_bsscfg_int_get(ifp, "per_chan_info", +- &chaninfo); ++ err = brcmf_fil_bsscfg_int_query(ifp, "per_chan_info", ++ &chaninfo); + if (!err) { + if (chaninfo & WL_CHAN_RADAR) + channel->flags |= +@@ -7081,7 +7082,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) + + /* verify support for bw_cap command */ + val = WLC_BAND_5G; +- err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &val); ++ err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &val); + + if (!err) { + /* only set 2G bandwidth using bw_cap command */ +@@ -7157,11 +7158,11 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[]) + int err; + + band = WLC_BAND_2G; +- err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band); ++ err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &band); + if (!err) { + bw_cap[NL80211_BAND_2GHZ] = band; + band = WLC_BAND_5G; +- err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band); ++ err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &band); + if (!err) { + bw_cap[NL80211_BAND_5GHZ] = band; + return; +@@ -7170,7 +7171,6 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[]) + return; + } + brcmf_dbg(INFO, "fallback to mimo_bw_cap info\n"); +- mimo_bwcap = 0; + err = brcmf_fil_iovar_int_get(ifp, "mimo_bw_cap", &mimo_bwcap); + if (err) + /* assume 20MHz if firmware does not give a clue */ +@@ -7266,7 +7266,7 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg) + struct brcmf_pub *drvr = cfg->pub; + struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0); + struct wiphy *wiphy = cfg_to_wiphy(cfg); +- u32 nmode = 0; ++ u32 nmode; + u32 vhtmode = 0; + u32 bw_cap[2] = { WLC_BW_20MHZ_BIT, WLC_BW_20MHZ_BIT }; + u32 rxchain; +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c +index a9514d72f770b6..6385a7db7f7d77 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c +@@ -142,6 +142,7 @@ brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len) + + return err; + } ++BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_cmd_data_set); + + s32 + brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len) +@@ -160,36 +161,7 @@ brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len) + + return err; + } +- +- +-s32 +-brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data) +-{ +- s32 err; +- __le32 data_le = cpu_to_le32(data); +- +- mutex_lock(&ifp->drvr->proto_block); +- brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, data); +- err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true); +- mutex_unlock(&ifp->drvr->proto_block); +- +- return err; +-} +- +-s32 +-brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data) +-{ +- s32 err; +- __le32 data_le = cpu_to_le32(*data); +- +- mutex_lock(&ifp->drvr->proto_block); +- err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false); +- mutex_unlock(&ifp->drvr->proto_block); +- *data = le32_to_cpu(data_le); +- brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data); +- +- return err; +-} ++BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_cmd_data_get); + + static u32 + brcmf_create_iovar(const char *name, const char *data, u32 datalen, +@@ -271,26 +243,7 @@ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data, + mutex_unlock(&drvr->proto_block); + return err; + } +- +-s32 +-brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data) +-{ +- __le32 data_le = cpu_to_le32(data); +- +- return brcmf_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le)); +-} +- +-s32 +-brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data) +-{ +- __le32 data_le = cpu_to_le32(*data); +- s32 err; +- +- err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le)); +- if (err == 0) +- *data = le32_to_cpu(data_le); +- return err; +-} ++BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_iovar_data_get); + + static u32 + brcmf_create_bsscfg(s32 bsscfgidx, const char *name, char *data, u32 datalen, +@@ -365,6 +318,7 @@ brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name, + mutex_unlock(&drvr->proto_block); + return err; + } ++BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_bsscfg_data_set); + + s32 + brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name, +@@ -395,28 +349,7 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name, + mutex_unlock(&drvr->proto_block); + return err; + } +- +-s32 +-brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data) +-{ +- __le32 data_le = cpu_to_le32(data); +- +- return brcmf_fil_bsscfg_data_set(ifp, name, &data_le, +- sizeof(data_le)); +-} +- +-s32 +-brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data) +-{ +- __le32 data_le = cpu_to_le32(*data); +- s32 err; +- +- err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le, +- sizeof(data_le)); +- if (err == 0) +- *data = le32_to_cpu(data_le); +- return err; +-} ++BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_bsscfg_data_get); + + static u32 brcmf_create_xtlv(const char *name, u16 id, char *data, u32 len, + char *buf, u32 buflen) +@@ -466,6 +399,7 @@ s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id, + mutex_unlock(&drvr->proto_block); + return err; + } ++BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_xtlv_data_set); + + s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id, + void *data, u32 len) +@@ -495,39 +429,4 @@ s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id, + mutex_unlock(&drvr->proto_block); + return err; + } +- +-s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data) +-{ +- __le32 data_le = cpu_to_le32(data); +- +- return brcmf_fil_xtlv_data_set(ifp, name, id, &data_le, +- sizeof(data_le)); +-} +- +-s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data) +-{ +- __le32 data_le = cpu_to_le32(*data); +- s32 err; +- +- err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le)); +- if (err == 0) +- *data = le32_to_cpu(data_le); +- return err; +-} +- +-s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data) +-{ +- return brcmf_fil_xtlv_data_get(ifp, name, id, data, sizeof(*data)); +-} +- +-s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data) +-{ +- __le16 data_le = cpu_to_le16(*data); +- s32 err; +- +- err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le)); +- if (err == 0) +- *data = le16_to_cpu(data_le); +- return err; +-} +- ++BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_xtlv_data_get); +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h +index bc693157c4b1c8..31e080e4da6697 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h +@@ -81,29 +81,142 @@ + + s32 brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len); + s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len); +-s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data); +-s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data); ++static inline ++s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data) ++{ ++ s32 err; ++ __le32 data_le = cpu_to_le32(data); + +-s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *data, +- u32 len); ++ brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, data); ++ err = brcmf_fil_cmd_data_set(ifp, cmd, &data_le, sizeof(data_le)); ++ ++ return err; ++} ++static inline ++s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data) ++{ ++ s32 err; ++ ++ err = brcmf_fil_cmd_data_get(ifp, cmd, data, sizeof(*data)); ++ if (err == 0) ++ *data = le32_to_cpu(*(__le32 *)data); ++ brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data); ++ ++ return err; ++} ++static inline ++s32 brcmf_fil_cmd_int_query(struct brcmf_if *ifp, u32 cmd, u32 *data) ++{ ++ __le32 *data_le = (__le32 *)data; ++ ++ *data_le = cpu_to_le32(*data); ++ return brcmf_fil_cmd_int_get(ifp, cmd, data); ++} ++ ++s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, ++ const void *data, u32 len); + s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data, + u32 len); +-s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data); +-s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data); +- +-s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name, void *data, +- u32 len); +-s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name, void *data, +- u32 len); +-s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data); +-s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data); ++static inline ++s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data) ++{ ++ __le32 data_le = cpu_to_le32(data); ++ ++ return brcmf_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le)); ++} ++static inline ++s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data) ++{ ++ s32 err; ++ ++ err = brcmf_fil_iovar_data_get(ifp, name, data, sizeof(*data)); ++ if (err == 0) ++ *data = le32_to_cpu(*(__le32 *)data); ++ return err; ++} ++static inline ++s32 brcmf_fil_iovar_int_query(struct brcmf_if *ifp, const char *name, u32 *data) ++{ ++ __le32 *data_le = (__le32 *)data; ++ ++ *data_le = cpu_to_le32(*data); ++ return brcmf_fil_iovar_int_get(ifp, name, data); ++} ++ ++ ++s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name, ++ void *data, u32 len); ++s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name, ++ void *data, u32 len); ++static inline ++s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data) ++{ ++ __le32 data_le = cpu_to_le32(data); ++ ++ return brcmf_fil_bsscfg_data_set(ifp, name, &data_le, ++ sizeof(data_le)); ++} ++static inline ++s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data) ++{ ++ s32 err; ++ ++ err = brcmf_fil_bsscfg_data_get(ifp, name, data, sizeof(*data)); ++ if (err == 0) ++ *data = le32_to_cpu(*(__le32 *)data); ++ return err; ++} ++static inline ++s32 brcmf_fil_bsscfg_int_query(struct brcmf_if *ifp, const char *name, u32 *data) ++{ ++ __le32 *data_le = (__le32 *)data; ++ ++ *data_le = cpu_to_le32(*data); ++ return brcmf_fil_bsscfg_int_get(ifp, name, data); ++} ++ + s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id, + void *data, u32 len); + s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id, + void *data, u32 len); +-s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data); +-s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data); +-s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data); +-s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data); ++static inline ++s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, ++ u32 data) ++{ ++ __le32 data_le = cpu_to_le32(data); ++ ++ return brcmf_fil_xtlv_data_set(ifp, name, id, &data_le, ++ sizeof(data_le)); ++} ++static inline ++s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, ++ u32 *data) ++{ ++ __le32 data_le = cpu_to_le32(*data); ++ s32 err; ++ ++ err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le)); ++ if (err == 0) ++ *data = le32_to_cpu(data_le); ++ return err; ++} ++static inline ++s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, ++ u8 *data) ++{ ++ return brcmf_fil_xtlv_data_get(ifp, name, id, data, sizeof(*data)); ++} ++static inline ++s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, ++ u16 *data) ++{ ++ __le16 data_le = cpu_to_le16(*data); ++ s32 err; ++ ++ err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le)); ++ if (err == 0) ++ *data = le16_to_cpu(data_le); ++ return err; ++} + + #endif /* _fwil_h_ */ +diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c +index cc71b513adf987..cebd3c91756fe5 100644 +--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c ++++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c +@@ -152,6 +152,17 @@ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = { + .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, + }; + ++const struct iwl_cfg_trans_params iwl_gl_trans_cfg = { ++ .device_family = IWL_DEVICE_FAMILY_BZ, ++ .base_params = &iwl_bz_base_params, ++ .mq_rx_supported = true, ++ .rf_id = true, ++ .gen2 = true, ++ .umac_prph_offset = 0x300000, ++ .xtal_latency = 12000, ++ .low_latency_xtal = true, ++}; ++ + const char iwl_bz_name[] = "Intel(R) TBD Bz device"; + + const struct iwl_cfg iwl_cfg_bz = { +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h +index f45f645ca64851..dd3913617bb0be 100644 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h +@@ -493,6 +493,7 @@ extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg; + extern const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg; + extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg; + extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg; ++extern const struct iwl_cfg_trans_params iwl_gl_trans_cfg; + extern const struct iwl_cfg_trans_params iwl_sc_trans_cfg; + extern const char iwl9162_name[]; + extern const char iwl9260_name[]; +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +index 243eccc68cb050..f7bec6f3d75847 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +@@ -103,7 +103,7 @@ + #define IWL_MVM_FTM_INITIATOR_SECURE_LTF false + #define IWL_MVM_FTM_RESP_NDP_SUPPORT true + #define IWL_MVM_FTM_RESP_LMR_FEEDBACK_SUPPORT true +-#define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 5 ++#define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 7 + #define IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000 + #define IWL_MVM_D3_DEBUG false + #define IWL_MVM_USE_TWT true +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +index dea4d6478b4f4f..4a2de79f2e864b 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +@@ -501,10 +501,38 @@ static const struct pci_device_id iwl_hw_card_ids[] = { + {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)}, + + /* Bz devices */ +- {IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)}, +- {IWL_PCI_DEVICE(0x272D, PCI_ANY_ID, iwl_bz_trans_cfg)}, +- {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)}, +- {IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_gl_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x0098, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x009C, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x00C0, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x00C4, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x00E0, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x00E4, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x00E8, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x00EC, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x0100, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x0110, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x0114, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x0118, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x011C, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x0310, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x0314, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x0510, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x0A10, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x1671, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x1672, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x1771, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x1772, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x1791, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x1792, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x4090, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x40C4, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x40E0, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x4110, iwl_bz_trans_cfg)}, ++ {IWL_PCI_DEVICE(0xA840, 0x4314, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)}, + + /* Sc devices */ +diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c +index 85bffcf4f6fbfc..bf4541e76ba228 100644 +--- a/drivers/net/wireless/mediatek/mt76/mac80211.c ++++ b/drivers/net/wireless/mediatek/mt76/mac80211.c +@@ -1503,7 +1503,7 @@ EXPORT_SYMBOL_GPL(mt76_wcid_init); + + void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid) + { +- struct mt76_phy *phy = dev->phys[wcid->phy_idx]; ++ struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx); + struct ieee80211_hw *hw; + struct sk_buff_head list; + struct sk_buff *skb; +diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c +index b3a61b0ddd03d7..525444953df687 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c +@@ -29,7 +29,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) + struct ieee80211_sta *sta; + struct mt7603_sta *msta; + struct mt76_wcid *wcid; +- u8 tid = 0, hwq = 0; ++ u8 qid, tid = 0, hwq = 0; + void *priv; + int idx; + u32 val; +@@ -57,7 +57,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) + if (ieee80211_is_data_qos(hdr->frame_control)) { + tid = *ieee80211_get_qos_ctl(hdr) & + IEEE80211_QOS_CTL_TAG1D_MASK; +- u8 qid = tid_to_ac[tid]; ++ qid = tid_to_ac[tid]; + hwq = wmm_queue_map[qid]; + skb_set_queue_mapping(skb, qid); + } else if (ieee80211_is_data(hdr->frame_control)) { +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c +index 18a50ccff106a8..f22a1aa8850521 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c +@@ -56,6 +56,9 @@ int mt7615_thermal_init(struct mt7615_dev *dev) + + name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7615_%s", + wiphy_name(wiphy)); ++ if (!name) ++ return -ENOMEM; ++ + hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, dev, + mt7615_hwmon_groups); + if (IS_ERR(hwmon)) +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c +index 35fdf4f98d80ba..e6af7318a9e38a 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c +@@ -194,6 +194,8 @@ static int mt7915_thermal_init(struct mt7915_phy *phy) + + name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7915_%s", + wiphy_name(wiphy)); ++ if (!name) ++ return -ENOMEM; + + cdev = thermal_cooling_device_register(name, phy, &mt7915_thermal_ops); + if (!IS_ERR(cdev)) { +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c +index 260fe00d7dc6d8..27655dcb791427 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c +@@ -557,8 +557,7 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw, + + MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS | + MT_WF_RFCR_DROP_RTS | +- MT_WF_RFCR_DROP_CTL_RSV | +- MT_WF_RFCR_DROP_NDPA); ++ MT_WF_RFCR_DROP_CTL_RSV); + + *total_flags = flags; + mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter); +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c +index ff63f37f67d9c9..61de6b03fa0beb 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c +@@ -52,6 +52,8 @@ static int mt7921_thermal_init(struct mt792x_phy *phy) + + name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7921_%s", + wiphy_name(wiphy)); ++ if (!name) ++ return -ENOMEM; + + hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy, + mt7921_hwmon_groups); +diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c +index 2016ed9197fe3c..aee531cab46f64 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c +@@ -561,8 +561,6 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy, + return; + + elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER; +- if (vif == NL80211_IFTYPE_AP) +- elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER; + + c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, + sts - 1) | +@@ -570,6 +568,11 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy, + sts - 1); + elem->phy_cap_info[5] |= c; + ++ if (vif != NL80211_IFTYPE_AP) ++ return; ++ ++ elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER; ++ + c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB | + IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB; + elem->phy_cap_info[6] |= c; +@@ -729,7 +732,6 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band, + IEEE80211_EHT_MAC_CAP0_OM_CONTROL; + + eht_cap_elem->phy_cap_info[0] = +- IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ | + IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | + IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER | + IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE; +@@ -743,30 +745,36 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band, + u8_encode_bits(u8_get_bits(val, GENMASK(2, 1)), + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) | + u8_encode_bits(val, +- IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK) | +- u8_encode_bits(val, +- IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK); ++ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK); + + eht_cap_elem->phy_cap_info[2] = + u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK) | +- u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK) | +- u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK); ++ u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK); ++ ++ if (band == NL80211_BAND_6GHZ) { ++ eht_cap_elem->phy_cap_info[0] |= ++ IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; ++ ++ eht_cap_elem->phy_cap_info[1] |= ++ u8_encode_bits(val, ++ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK); ++ ++ eht_cap_elem->phy_cap_info[2] |= ++ u8_encode_bits(sts - 1, ++ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK); ++ } + + eht_cap_elem->phy_cap_info[3] = + IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK | +- IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | +- IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | +- IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | +- IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK; ++ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK; + + eht_cap_elem->phy_cap_info[4] = + u8_encode_bits(min_t(int, sts - 1, 2), + IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK); + + eht_cap_elem->phy_cap_info[5] = +- IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | + u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US, + IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK) | + u8_encode_bits(u8_get_bits(0x11, GENMASK(1, 0)), +@@ -780,14 +788,6 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band, + IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK) | + u8_encode_bits(val, IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK); + +- eht_cap_elem->phy_cap_info[7] = +- IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | +- IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | +- IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ | +- IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | +- IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ | +- IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ; +- + val = u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_RX) | + u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_TX); + #define SET_EHT_MAX_NSS(_bw, _val) do { \ +@@ -798,8 +798,29 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band, + + SET_EHT_MAX_NSS(80, val); + SET_EHT_MAX_NSS(160, val); +- SET_EHT_MAX_NSS(320, val); ++ if (band == NL80211_BAND_6GHZ) ++ SET_EHT_MAX_NSS(320, val); + #undef SET_EHT_MAX_NSS ++ ++ if (iftype != NL80211_IFTYPE_AP) ++ return; ++ ++ eht_cap_elem->phy_cap_info[3] |= ++ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | ++ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK; ++ ++ eht_cap_elem->phy_cap_info[7] = ++ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | ++ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | ++ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | ++ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ; ++ ++ if (band != NL80211_BAND_6GHZ) ++ return; ++ ++ eht_cap_elem->phy_cap_info[7] |= ++ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ | ++ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ; + } + + static void +diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c +index 7fea9f0d409bf5..0e69f0a508616b 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c +@@ -190,7 +190,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw, + mvif->mt76.omac_idx = idx; + mvif->phy = phy; + mvif->mt76.band_idx = band_idx; +- mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP; ++ mvif->mt76.wmm_idx = vif->type == NL80211_IFTYPE_AP ? 0 : 3; + + ret = mt7996_mcu_add_dev_info(phy, vif, true); + if (ret) +@@ -287,6 +287,10 @@ int mt7996_set_channel(struct mt7996_phy *phy) + if (ret) + goto out; + ++ ret = mt7996_mcu_set_chan_info(phy, UNI_CHANNEL_RX_PATH); ++ if (ret) ++ goto out; ++ + ret = mt7996_dfs_init_radar_detector(phy); + mt7996_mac_cca_stats_reset(phy); + +diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +index b66f712e1b17b7..302171e1035977 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +@@ -520,13 +520,10 @@ void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb) + static struct tlv * + mt7996_mcu_add_uni_tlv(struct sk_buff *skb, u16 tag, u16 len) + { +- struct tlv *ptlv, tlv = { +- .tag = cpu_to_le16(tag), +- .len = cpu_to_le16(len), +- }; ++ struct tlv *ptlv = skb_put_zero(skb, len); + +- ptlv = skb_put(skb, len); +- memcpy(ptlv, &tlv, sizeof(tlv)); ++ ptlv->tag = cpu_to_le16(tag); ++ ptlv->len = cpu_to_le16(len); + + return ptlv; + } +@@ -1188,10 +1185,10 @@ mt7996_is_ebf_supported(struct mt7996_phy *phy, struct ieee80211_vif *vif, + + if (bfee) + return vif->bss_conf.eht_su_beamformee && +- EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]); ++ EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]); + else + return vif->bss_conf.eht_su_beamformer && +- EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]); ++ EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]); + } + + if (sta->deflink.he_cap.has_he) { +@@ -1303,6 +1300,9 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif, + u8 nss_mcs = mt7996_mcu_get_sta_nss(mcs_map); + u8 snd_dim, sts; + ++ if (!vc) ++ return; ++ + bf->tx_mode = MT_PHY_TYPE_HE_SU; + + mt7996_mcu_sta_sounding_rate(bf); +@@ -1412,7 +1412,7 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb, + { + struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + struct mt7996_phy *phy = mvif->phy; +- int tx_ant = hweight8(phy->mt76->chainmask) - 1; ++ int tx_ant = hweight16(phy->mt76->chainmask) - 1; + struct sta_rec_bf *bf; + struct tlv *tlv; + const u8 matrix[4][4] = { +@@ -2072,7 +2072,7 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, + info = IEEE80211_SKB_CB(skb); + info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx); + +- len = sizeof(*bcn) + MT_TXD_SIZE + skb->len; ++ len = ALIGN(sizeof(*bcn) + MT_TXD_SIZE + skb->len, 4); + tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_BCN_CONTENT, len); + bcn = (struct bss_bcn_content_tlv *)tlv; + bcn->enable = en; +@@ -2141,8 +2141,7 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev, + info->band = band; + info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx); + +- len = sizeof(*discov) + MT_TXD_SIZE + skb->len; +- ++ len = ALIGN(sizeof(*discov) + MT_TXD_SIZE + skb->len, 4); + tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_OFFLOAD, len); + + discov = (struct bss_inband_discovery_tlv *)tlv; +diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h +index dc8d0a30c707cd..58504b80eae8b0 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h ++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h +@@ -587,10 +587,10 @@ enum { + sizeof(struct sta_rec_hdr_trans) + \ + sizeof(struct tlv)) + +-#define MT7996_MAX_BEACON_SIZE 1342 ++#define MT7996_MAX_BEACON_SIZE 1338 + #define MT7996_BEACON_UPDATE_SIZE (sizeof(struct bss_req_hdr) + \ + sizeof(struct bss_bcn_content_tlv) + \ +- MT_TXD_SIZE + \ ++ 4 + MT_TXD_SIZE + \ + sizeof(struct bss_bcn_cntdwn_tlv) + \ + sizeof(struct bss_bcn_mbss_tlv)) + #define MT7996_MAX_BSS_OFFLOAD_SIZE (MT7996_MAX_BEACON_SIZE + \ +diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c +index e4bb3ea6e22676..25e881ee727cdb 100644 +--- a/drivers/net/wireless/microchip/wilc1000/hif.c ++++ b/drivers/net/wireless/microchip/wilc1000/hif.c +@@ -381,6 +381,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss, + struct wilc_join_bss_param *param; + u8 rates_len = 0; + int ies_len; ++ u64 ies_tsf; + int ret; + + param = kzalloc(sizeof(*param), GFP_KERNEL); +@@ -396,6 +397,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss, + return NULL; + } + ies_len = ies->len; ++ ies_tsf = ies->tsf; + rcu_read_unlock(); + + param->beacon_period = cpu_to_le16(bss->beacon_interval); +@@ -451,7 +453,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss, + IEEE80211_P2P_ATTR_ABSENCE_NOTICE, + (u8 *)&noa_attr, sizeof(noa_attr)); + if (ret > 0) { +- param->tsf_lo = cpu_to_le32(ies->tsf); ++ param->tsf_lo = cpu_to_le32(ies_tsf); + param->noa_enabled = 1; + param->idx = noa_attr.index; + if (noa_attr.oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT) { +diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c +index 86467d2f8888ca..d35f26919806a7 100644 +--- a/drivers/net/wireless/realtek/rtw88/coex.c ++++ b/drivers/net/wireless/realtek/rtw88/coex.c +@@ -2194,7 +2194,6 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev) + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + u8 table_case, tdma_case; +- bool wl_cpt_test = false, bt_cpt_test = false; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + +@@ -2202,29 +2201,16 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev) + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + if (efuse->share_ant) { + /* Shared-Ant */ +- if (wl_cpt_test) { +- if (coex_stat->wl_gl_busy) { +- table_case = 20; +- tdma_case = 17; +- } else { +- table_case = 10; +- tdma_case = 15; +- } +- } else if (bt_cpt_test) { +- table_case = 26; +- tdma_case = 26; +- } else { +- if (coex_stat->wl_gl_busy && +- coex_stat->wl_noisy_level == 0) +- table_case = 14; +- else +- table_case = 10; ++ if (coex_stat->wl_gl_busy && ++ coex_stat->wl_noisy_level == 0) ++ table_case = 14; ++ else ++ table_case = 10; + +- if (coex_stat->wl_gl_busy) +- tdma_case = 15; +- else +- tdma_case = 20; +- } ++ if (coex_stat->wl_gl_busy) ++ tdma_case = 15; ++ else ++ tdma_case = 20; + } else { + /* Non-Shared-Ant */ + table_case = 112; +@@ -2235,11 +2221,7 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev) + tdma_case = 120; + } + +- if (wl_cpt_test) +- rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[1]); +- else +- rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); +- ++ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_table(rtwdev, false, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); + } +diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c +index a1b674e3caaa3c..3596cf99c2ed44 100644 +--- a/drivers/net/wireless/realtek/rtw88/fw.c ++++ b/drivers/net/wireless/realtek/rtw88/fw.c +@@ -1388,10 +1388,12 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, + val |= BIT_ENSWBCN >> 8; + rtw_write8(rtwdev, REG_CR + 1, val); + +- val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2); +- bckp[1] = val; +- val &= ~(BIT_EN_BCNQ_DL >> 16); +- rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val); ++ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) { ++ val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2); ++ bckp[1] = val; ++ val &= ~(BIT_EN_BCNQ_DL >> 16); ++ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val); ++ } + + ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size); + if (ret) { +@@ -1416,7 +1418,8 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, + rsvd_pg_head = rtwdev->fifo.rsvd_boundary; + rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, + rsvd_pg_head | BIT_BCN_VALID_V1); +- rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]); ++ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) ++ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]); + rtw_write8(rtwdev, REG_CR + 1, bckp[0]); + + return ret; +diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c +index 63673005c2fb19..b90ea6c88b15d9 100644 +--- a/drivers/net/wireless/realtek/rtw88/main.c ++++ b/drivers/net/wireless/realtek/rtw88/main.c +@@ -1314,20 +1314,21 @@ static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev) + { + const struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_fw_state *fw; ++ int ret = 0; + + fw = &rtwdev->fw; + wait_for_completion(&fw->completion); + if (!fw->firmware) +- return -EINVAL; ++ ret = -EINVAL; + + if (chip->wow_fw_name) { + fw = &rtwdev->wow_fw; + wait_for_completion(&fw->completion); + if (!fw->firmware) +- return -EINVAL; ++ ret = -EINVAL; + } + +- return 0; ++ return ret; + } + + static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev, +diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c +index e2c7d9f876836a..a019f4085e7389 100644 +--- a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c ++++ b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c +@@ -31,8 +31,6 @@ static const struct usb_device_id rtw_8821cu_id_table[] = { + .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82b, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, +- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82c, 0xff, 0xff, 0xff), +- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331d, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* D-Link */ + { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xc811, 0xff, 0xff, 0xff), +diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c +index cd965edc29cea3..3fe5c70ce731be 100644 +--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c ++++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c +@@ -2611,12 +2611,14 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status, + else + rxsc = GET_PHY_STAT_P1_HT_RXSC(phy_status); + +- if (rxsc >= 9 && rxsc <= 12) ++ if (rxsc == 0) ++ bw = rtwdev->hal.current_band_width; ++ else if (rxsc >= 1 && rxsc <= 8) ++ bw = RTW_CHANNEL_WIDTH_20; ++ else if (rxsc >= 9 && rxsc <= 12) + bw = RTW_CHANNEL_WIDTH_40; +- else if (rxsc >= 13) +- bw = RTW_CHANNEL_WIDTH_80; + else +- bw = RTW_CHANNEL_WIDTH_20; ++ bw = RTW_CHANNEL_WIDTH_80; + + channel = GET_PHY_STAT_P1_CHANNEL(phy_status); + rtw_set_rx_freq_band(pkt_stat, channel); +diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c +index 9ab836d0d4f12d..079b8cd7978573 100644 +--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c ++++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c +@@ -778,7 +778,7 @@ static void ndev_init_debugfs(struct intel_ntb_dev *ndev) + ndev->debugfs_dir = + debugfs_create_dir(pci_name(ndev->ntb.pdev), + debugfs_dir); +- if (!ndev->debugfs_dir) ++ if (IS_ERR(ndev->debugfs_dir)) + ndev->debugfs_info = NULL; + else + ndev->debugfs_info = +diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c +index f9e7847a378e77..c84fadfc63c52c 100644 +--- a/drivers/ntb/ntb_transport.c ++++ b/drivers/ntb/ntb_transport.c +@@ -807,16 +807,29 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) + } + + static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw, +- struct device *dma_dev, size_t align) ++ struct device *ntb_dev, size_t align) + { + dma_addr_t dma_addr; + void *alloc_addr, *virt_addr; + int rc; + +- alloc_addr = dma_alloc_coherent(dma_dev, mw->alloc_size, +- &dma_addr, GFP_KERNEL); ++ /* ++ * The buffer here is allocated against the NTB device. The reason to ++ * use dma_alloc_*() call is to allocate a large IOVA contiguous buffer ++ * backing the NTB BAR for the remote host to write to. During receive ++ * processing, the data is being copied out of the receive buffer to ++ * the kernel skbuff. When a DMA device is being used, dma_map_page() ++ * is called on the kvaddr of the receive buffer (from dma_alloc_*()) ++ * and remapped against the DMA device. It appears to be a double ++ * DMA mapping of buffers, but first is mapped to the NTB device and ++ * second is to the DMA device. DMA_ATTR_FORCE_CONTIGUOUS is necessary ++ * in order for the later dma_map_page() to not fail. ++ */ ++ alloc_addr = dma_alloc_attrs(ntb_dev, mw->alloc_size, ++ &dma_addr, GFP_KERNEL, ++ DMA_ATTR_FORCE_CONTIGUOUS); + if (!alloc_addr) { +- dev_err(dma_dev, "Unable to alloc MW buff of size %zu\n", ++ dev_err(ntb_dev, "Unable to alloc MW buff of size %zu\n", + mw->alloc_size); + return -ENOMEM; + } +@@ -845,7 +858,7 @@ static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw, + return 0; + + err: +- dma_free_coherent(dma_dev, mw->alloc_size, alloc_addr, dma_addr); ++ dma_free_coherent(ntb_dev, mw->alloc_size, alloc_addr, dma_addr); + + return rc; + } +diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c +index 553f1f46bc664f..72bc1d017a46ee 100644 +--- a/drivers/ntb/test/ntb_perf.c ++++ b/drivers/ntb/test/ntb_perf.c +@@ -1227,7 +1227,7 @@ static ssize_t perf_dbgfs_read_info(struct file *filep, char __user *ubuf, + "\tOut buffer addr 0x%pK\n", peer->outbuf); + + pos += scnprintf(buf + pos, buf_size - pos, +- "\tOut buff phys addr %pa[p]\n", &peer->out_phys_addr); ++ "\tOut buff phys addr %pap\n", &peer->out_phys_addr); + + pos += scnprintf(buf + pos, buf_size - pos, + "\tOut buffer size %pa\n", &peer->outbuf_size); +diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c +index 07177eadc56e87..1ea8c27e8874d2 100644 +--- a/drivers/nvdimm/namespace_devs.c ++++ b/drivers/nvdimm/namespace_devs.c +@@ -1927,12 +1927,16 @@ static int cmp_dpa(const void *a, const void *b) + static struct device **scan_labels(struct nd_region *nd_region) + { + int i, count = 0; +- struct device *dev, **devs = NULL; ++ struct device *dev, **devs; + struct nd_label_ent *label_ent, *e; + struct nd_mapping *nd_mapping = &nd_region->mapping[0]; + struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); + resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1; + ++ devs = kcalloc(2, sizeof(dev), GFP_KERNEL); ++ if (!devs) ++ return NULL; ++ + /* "safe" because create_namespace_pmem() might list_move() label_ent */ + list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { + struct nd_namespace_label *nd_label = label_ent->label; +@@ -1951,12 +1955,14 @@ static struct device **scan_labels(struct nd_region *nd_region) + goto err; + if (i < count) + continue; +- __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL); +- if (!__devs) +- goto err; +- memcpy(__devs, devs, sizeof(dev) * count); +- kfree(devs); +- devs = __devs; ++ if (count) { ++ __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL); ++ if (!__devs) ++ goto err; ++ memcpy(__devs, devs, sizeof(dev) * count); ++ kfree(devs); ++ devs = __devs; ++ } + + dev = create_namespace_pmem(nd_region, nd_mapping, nd_label); + if (IS_ERR(dev)) { +@@ -1983,11 +1989,6 @@ static struct device **scan_labels(struct nd_region *nd_region) + + /* Publish a zero-sized namespace for userspace to configure. */ + nd_mapping_free_labels(nd_mapping); +- +- devs = kcalloc(2, sizeof(dev), GFP_KERNEL); +- if (!devs) +- goto err; +- + nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); + if (!nspm) + goto err; +@@ -2026,11 +2027,10 @@ static struct device **scan_labels(struct nd_region *nd_region) + return devs; + + err: +- if (devs) { +- for (i = 0; devs[i]; i++) +- namespace_pmem_release(devs[i]); +- kfree(devs); +- } ++ for (i = 0; devs[i]; i++) ++ namespace_pmem_release(devs[i]); ++ kfree(devs); ++ + return NULL; + } + +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c +index 645a6b13222057..37ea0fa421da8b 100644 +--- a/drivers/nvme/host/multipath.c ++++ b/drivers/nvme/host/multipath.c +@@ -585,7 +585,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) + rc = device_add_disk(&head->subsys->dev, head->disk, + nvme_ns_id_attr_groups); + if (rc) { +- clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags); ++ clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags); + return; + } + nvme_add_ns_head_cdev(head); +diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c +index b445ffe95e3f04..0e29a76ca53077 100644 +--- a/drivers/pci/controller/dwc/pci-dra7xx.c ++++ b/drivers/pci/controller/dwc/pci-dra7xx.c +@@ -841,7 +841,8 @@ static int dra7xx_pcie_probe(struct platform_device *pdev) + dra7xx->mode = mode; + + ret = devm_request_threaded_irq(dev, irq, NULL, dra7xx_pcie_irq_handler, +- IRQF_SHARED, "dra7xx-pcie-main", dra7xx); ++ IRQF_SHARED | IRQF_ONESHOT, ++ "dra7xx-pcie-main", dra7xx); + if (ret) { + dev_err(dev, "failed to request irq\n"); + goto err_gpio; +diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c +index 74703362aeec71..86b09b5d7f2493 100644 +--- a/drivers/pci/controller/dwc/pci-imx6.c ++++ b/drivers/pci/controller/dwc/pci-imx6.c +@@ -997,7 +997,7 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp) + ret = phy_power_on(imx6_pcie->phy); + if (ret) { + dev_err(dev, "waiting for PHY ready timeout!\n"); +- goto err_phy_off; ++ goto err_phy_exit; + } + } + +@@ -1012,8 +1012,9 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp) + return 0; + + err_phy_off: +- if (imx6_pcie->phy) +- phy_exit(imx6_pcie->phy); ++ phy_power_off(imx6_pcie->phy); ++err_phy_exit: ++ phy_exit(imx6_pcie->phy); + err_clk_disable: + imx6_pcie_clk_disable(imx6_pcie); + err_reg_disable: +diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c +index c1dedc83759c60..c5475830c835f5 100644 +--- a/drivers/pci/controller/dwc/pci-keystone.c ++++ b/drivers/pci/controller/dwc/pci-keystone.c +@@ -579,7 +579,7 @@ static void ks_pcie_quirk(struct pci_dev *dev) + */ + if (pci_match_id(am6_pci_devids, bridge)) { + bridge_dev = pci_get_host_bridge_device(dev); +- if (!bridge_dev && !bridge_dev->parent) ++ if (!bridge_dev || !bridge_dev->parent) + return; + + ks_pcie = dev_get_drvdata(bridge_dev->parent); +diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c +index 2ee146767971c3..421697ec7591d6 100644 +--- a/drivers/pci/controller/dwc/pcie-kirin.c ++++ b/drivers/pci/controller/dwc/pcie-kirin.c +@@ -415,12 +415,12 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie, + if (pcie->gpio_id_reset[i] < 0) + continue; + +- pcie->num_slots++; +- if (pcie->num_slots > MAX_PCI_SLOTS) { ++ if (pcie->num_slots + 1 >= MAX_PCI_SLOTS) { + dev_err(dev, "Too many PCI slots!\n"); + ret = -EINVAL; + goto put_node; + } ++ pcie->num_slots++; + + ret = of_pci_get_devfn(child); + if (ret < 0) { +diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c +index 176686bdb15c18..5b82098f32b7c7 100644 +--- a/drivers/pci/controller/pcie-xilinx-nwl.c ++++ b/drivers/pci/controller/pcie-xilinx-nwl.c +@@ -80,8 +80,8 @@ + #define MSGF_MISC_SR_NON_FATAL_DEV BIT(22) + #define MSGF_MISC_SR_FATAL_DEV BIT(23) + #define MSGF_MISC_SR_LINK_DOWN BIT(24) +-#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25) +-#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26) ++#define MSGF_MISC_SR_LINK_AUTO_BWIDTH BIT(25) ++#define MSGF_MISC_SR_LINK_BWIDTH BIT(26) + + #define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \ + MSGF_MISC_SR_RXMSG_OVER | \ +@@ -96,8 +96,8 @@ + MSGF_MISC_SR_NON_FATAL_DEV | \ + MSGF_MISC_SR_FATAL_DEV | \ + MSGF_MISC_SR_LINK_DOWN | \ +- MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \ +- MSGF_MSIC_SR_LINK_BWIDTH) ++ MSGF_MISC_SR_LINK_AUTO_BWIDTH | \ ++ MSGF_MISC_SR_LINK_BWIDTH) + + /* Legacy interrupt status mask bits */ + #define MSGF_LEG_SR_INTA BIT(0) +@@ -301,10 +301,10 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data) + if (misc_stat & MSGF_MISC_SR_FATAL_DEV) + dev_err(dev, "Fatal Error Detected\n"); + +- if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH) ++ if (misc_stat & MSGF_MISC_SR_LINK_AUTO_BWIDTH) + dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n"); + +- if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH) ++ if (misc_stat & MSGF_MISC_SR_LINK_BWIDTH) + dev_info(dev, "Link Bandwidth Management Status bit set\n"); + + /* Clear misc interrupt status */ +@@ -373,7 +373,7 @@ static void nwl_mask_leg_irq(struct irq_data *data) + u32 mask; + u32 val; + +- mask = 1 << (data->hwirq - 1); ++ mask = 1 << data->hwirq; + raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); + val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); + nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK); +@@ -387,7 +387,7 @@ static void nwl_unmask_leg_irq(struct irq_data *data) + u32 mask; + u32 val; + +- mask = 1 << (data->hwirq - 1); ++ mask = 1 << data->hwirq; + raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); + val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); + nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK); +@@ -790,6 +790,7 @@ static int nwl_pcie_probe(struct platform_device *pdev) + return -ENODEV; + + pcie = pci_host_bridge_priv(bridge); ++ platform_set_drvdata(pdev, pcie); + + pcie->dev = dev; + pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT; +@@ -813,13 +814,13 @@ static int nwl_pcie_probe(struct platform_device *pdev) + err = nwl_pcie_bridge_init(pcie); + if (err) { + dev_err(dev, "HW Initialization failed\n"); +- return err; ++ goto err_clk; + } + + err = nwl_pcie_init_irq_domain(pcie); + if (err) { + dev_err(dev, "Failed creating IRQ Domain\n"); +- return err; ++ goto err_clk; + } + + bridge->sysdata = pcie; +@@ -829,11 +830,24 @@ static int nwl_pcie_probe(struct platform_device *pdev) + err = nwl_pcie_enable_msi(pcie); + if (err < 0) { + dev_err(dev, "failed to enable MSI support: %d\n", err); +- return err; ++ goto err_clk; + } + } + +- return pci_host_probe(bridge); ++ err = pci_host_probe(bridge); ++ if (!err) ++ return 0; ++ ++err_clk: ++ clk_disable_unprepare(pcie->clk); ++ return err; ++} ++ ++static void nwl_pcie_remove(struct platform_device *pdev) ++{ ++ struct nwl_pcie *pcie = platform_get_drvdata(pdev); ++ ++ clk_disable_unprepare(pcie->clk); + } + + static struct platform_driver nwl_pcie_driver = { +@@ -843,5 +857,6 @@ static struct platform_driver nwl_pcie_driver = { + .of_match_table = nwl_pcie_of_match, + }, + .probe = nwl_pcie_probe, ++ .remove_new = nwl_pcie_remove, + }; + builtin_platform_driver(nwl_pcie_driver); +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index 53e9e9788bd54d..93f2f4dcf6d696 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -1208,7 +1208,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) + if (delay > PCI_RESET_WAIT) { + if (retrain) { + retrain = false; +- if (pcie_failed_link_retrain(bridge)) { ++ if (pcie_failed_link_retrain(bridge) == 0) { + delay = 1; + continue; + } +@@ -5017,7 +5017,15 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt) + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL); + } + +- return pcie_wait_for_link_status(pdev, use_lt, !use_lt); ++ rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt); ++ ++ /* ++ * Clear LBMS after a manual retrain so that the bit can be used ++ * to track link speed or width changes made by hardware itself ++ * in attempt to correct unreliable link operation. ++ */ ++ pcie_capability_write_word(pdev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS); ++ return rc; + } + + /** +@@ -5875,8 +5883,10 @@ static void pci_bus_restore_locked(struct pci_bus *bus) + + list_for_each_entry(dev, &bus->devices, bus_list) { + pci_dev_restore(dev); +- if (dev->subordinate) ++ if (dev->subordinate) { ++ pci_bridge_wait_for_secondary_bus(dev, "bus reset"); + pci_bus_restore_locked(dev->subordinate); ++ } + } + } + +@@ -5910,8 +5920,10 @@ static void pci_slot_restore_locked(struct pci_slot *slot) + if (!dev->slot || dev->slot != slot) + continue; + pci_dev_restore(dev); +- if (dev->subordinate) ++ if (dev->subordinate) { ++ pci_bridge_wait_for_secondary_bus(dev, "slot reset"); + pci_bus_restore_locked(dev->subordinate); ++ } + } + } + +diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h +index 2cc032e8cbb926..d5e9010a135a14 100644 +--- a/drivers/pci/pci.h ++++ b/drivers/pci/pci.h +@@ -530,7 +530,7 @@ void pci_acs_init(struct pci_dev *dev); + int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); + int pci_dev_specific_enable_acs(struct pci_dev *dev); + int pci_dev_specific_disable_acs_redir(struct pci_dev *dev); +-bool pcie_failed_link_retrain(struct pci_dev *dev); ++int pcie_failed_link_retrain(struct pci_dev *dev); + #else + static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev, + u16 acs_flags) +@@ -545,9 +545,9 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev) + { + return -ENOTTY; + } +-static inline bool pcie_failed_link_retrain(struct pci_dev *dev) ++static inline int pcie_failed_link_retrain(struct pci_dev *dev) + { +- return false; ++ return -ENOTTY; + } + #endif + +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index ec4277d7835b23..0b08ac45effbbc 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -66,7 +66,7 @@ + * apply this erratum workaround to any downstream ports as long as they + * support Link Active reporting and have the Link Control 2 register. + * Restrict the speed to 2.5GT/s then with the Target Link Speed field, +- * request a retrain and wait 200ms for the data link to go up. ++ * request a retrain and check the result. + * + * If this turns out successful and we know by the Vendor:Device ID it is + * safe to do so, then lift the restriction, letting the devices negotiate +@@ -74,33 +74,45 @@ + * firmware may have already arranged and lift it with ports that already + * report their data link being up. + * +- * Return TRUE if the link has been successfully retrained, otherwise FALSE. ++ * Otherwise revert the speed to the original setting and request a retrain ++ * again to remove any residual state, ignoring the result as it's supposed ++ * to fail anyway. ++ * ++ * Return 0 if the link has been successfully retrained. Return an error ++ * if retraining was not needed or we attempted a retrain and it failed. + */ +-bool pcie_failed_link_retrain(struct pci_dev *dev) ++int pcie_failed_link_retrain(struct pci_dev *dev) + { + static const struct pci_device_id ids[] = { + { PCI_VDEVICE(ASMEDIA, 0x2824) }, /* ASMedia ASM2824 */ + {} + }; + u16 lnksta, lnkctl2; ++ int ret = -ENOTTY; + + if (!pci_is_pcie(dev) || !pcie_downstream_port(dev) || + !pcie_cap_has_lnkctl2(dev) || !dev->link_active_reporting) +- return false; ++ return ret; + + pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2); + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + if ((lnksta & (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_DLLLA)) == + PCI_EXP_LNKSTA_LBMS) { ++ u16 oldlnkctl2 = lnkctl2; ++ + pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n"); + + lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS; + lnkctl2 |= PCI_EXP_LNKCTL2_TLS_2_5GT; + pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2); + +- if (pcie_retrain_link(dev, false)) { ++ ret = pcie_retrain_link(dev, false); ++ if (ret) { + pci_info(dev, "retraining failed\n"); +- return false; ++ pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, ++ oldlnkctl2); ++ pcie_retrain_link(dev, true); ++ return ret; + } + + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); +@@ -117,13 +129,14 @@ bool pcie_failed_link_retrain(struct pci_dev *dev) + lnkctl2 |= lnkcap & PCI_EXP_LNKCAP_SLS; + pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2); + +- if (pcie_retrain_link(dev, false)) { ++ ret = pcie_retrain_link(dev, false); ++ if (ret) { + pci_info(dev, "retraining failed\n"); +- return false; ++ return ret; + } + } + +- return true; ++ return ret; + } + + static ktime_t fixup_debug_start(struct pci_dev *dev, +diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c +index 19d459a36be55c..818ce4424d34dc 100644 +--- a/drivers/perf/alibaba_uncore_drw_pmu.c ++++ b/drivers/perf/alibaba_uncore_drw_pmu.c +@@ -408,7 +408,7 @@ static irqreturn_t ali_drw_pmu_isr(int irq_num, void *data) + } + + /* clear common counter intr status */ +- clr_status = FIELD_PREP(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, 1); ++ clr_status = FIELD_PREP(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, status); + writel(clr_status, + drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_CLR); + } +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c +index 2c684e49a6fc73..0b3ce77136456a 100644 +--- a/drivers/perf/arm-cmn.c ++++ b/drivers/perf/arm-cmn.c +@@ -24,14 +24,6 @@ + #define CMN_NI_NODE_ID GENMASK_ULL(31, 16) + #define CMN_NI_LOGICAL_ID GENMASK_ULL(47, 32) + +-#define CMN_NODEID_DEVID(reg) ((reg) & 3) +-#define CMN_NODEID_EXT_DEVID(reg) ((reg) & 1) +-#define CMN_NODEID_PID(reg) (((reg) >> 2) & 1) +-#define CMN_NODEID_EXT_PID(reg) (((reg) >> 1) & 3) +-#define CMN_NODEID_1x1_PID(reg) (((reg) >> 2) & 7) +-#define CMN_NODEID_X(reg, bits) ((reg) >> (3 + (bits))) +-#define CMN_NODEID_Y(reg, bits) (((reg) >> 3) & ((1U << (bits)) - 1)) +- + #define CMN_CHILD_INFO 0x0080 + #define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0) + #define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16) +@@ -43,6 +35,9 @@ + #define CMN_MAX_XPS (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION) + #define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4) + ++/* Currently XPs are the node type we can have most of; others top out at 128 */ ++#define CMN_MAX_NODES_PER_EVENT CMN_MAX_XPS ++ + /* The CFG node has various info besides the discovery tree */ + #define CMN_CFGM_PERIPH_ID_01 0x0008 + #define CMN_CFGM_PID0_PART_0 GENMASK_ULL(7, 0) +@@ -78,7 +73,8 @@ + /* Technically this is 4 bits wide on DNs, but we only use 2 there anyway */ + #define CMN__PMU_OCCUP1_ID GENMASK_ULL(34, 32) + +-/* HN-Ps are weird... */ ++/* Some types are designed to coexist with another device in the same node */ ++#define CMN_CCLA_PMU_EVENT_SEL 0x008 + #define CMN_HNP_PMU_EVENT_SEL 0x008 + + /* DTMs live in the PMU space of XP registers */ +@@ -281,16 +277,16 @@ struct arm_cmn_node { + u16 id, logid; + enum cmn_node_type type; + +- int dtm; +- union { +- /* DN/HN-F/CXHA */ +- struct { +- u8 val : 4; +- u8 count : 4; +- } occupid[SEL_MAX]; +- /* XP */ +- u8 dtc; +- }; ++ /* XP properties really, but replicated to children for convenience */ ++ u8 dtm; ++ s8 dtc; ++ u8 portid_bits:4; ++ u8 deviceid_bits:4; ++ /* DN/HN-F/CXHA */ ++ struct { ++ u8 val : 4; ++ u8 count : 4; ++ } occupid[SEL_MAX]; + union { + u8 event[4]; + __le32 event_sel; +@@ -361,49 +357,33 @@ struct arm_cmn { + static int arm_cmn_hp_state; + + struct arm_cmn_nodeid { +- u8 x; +- u8 y; + u8 port; + u8 dev; + }; + + static int arm_cmn_xyidbits(const struct arm_cmn *cmn) + { +- return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1) | 2); ++ return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1)); + } + +-static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id) ++static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn_node *dn) + { + struct arm_cmn_nodeid nid; + +- if (cmn->num_xps == 1) { +- nid.x = 0; +- nid.y = 0; +- nid.port = CMN_NODEID_1x1_PID(id); +- nid.dev = CMN_NODEID_DEVID(id); +- } else { +- int bits = arm_cmn_xyidbits(cmn); +- +- nid.x = CMN_NODEID_X(id, bits); +- nid.y = CMN_NODEID_Y(id, bits); +- if (cmn->ports_used & 0xc) { +- nid.port = CMN_NODEID_EXT_PID(id); +- nid.dev = CMN_NODEID_EXT_DEVID(id); +- } else { +- nid.port = CMN_NODEID_PID(id); +- nid.dev = CMN_NODEID_DEVID(id); +- } +- } ++ nid.dev = dn->id & ((1U << dn->deviceid_bits) - 1); ++ nid.port = (dn->id >> dn->deviceid_bits) & ((1U << dn->portid_bits) - 1); + return nid; + } + + static struct arm_cmn_node *arm_cmn_node_to_xp(const struct arm_cmn *cmn, + const struct arm_cmn_node *dn) + { +- struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); +- int xp_idx = cmn->mesh_x * nid.y + nid.x; ++ int id = dn->id >> (dn->portid_bits + dn->deviceid_bits); ++ int bits = arm_cmn_xyidbits(cmn); ++ int x = id >> bits; ++ int y = id & ((1U << bits) - 1); + +- return cmn->xps + xp_idx; ++ return cmn->xps + cmn->mesh_x * y + x; + } + static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn, + enum cmn_node_type type) +@@ -489,13 +469,14 @@ static const char *arm_cmn_device_type(u8 type) + } + } + +-static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d) ++static void arm_cmn_show_logid(struct seq_file *s, const struct arm_cmn_node *xp, int p, int d) + { + struct arm_cmn *cmn = s->private; + struct arm_cmn_node *dn; ++ u16 id = xp->id | d | (p << xp->deviceid_bits); + + for (dn = cmn->dns; dn->type; dn++) { +- struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); ++ int pad = dn->logid < 10; + + if (dn->type == CMN_TYPE_XP) + continue; +@@ -503,10 +484,10 @@ static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d) + if (dn->type < CMN_TYPE_HNI) + continue; + +- if (nid.x != x || nid.y != y || nid.port != p || nid.dev != d) ++ if (dn->id != id) + continue; + +- seq_printf(s, " #%-2d |", dn->logid); ++ seq_printf(s, " %*c#%-*d |", pad + 1, ' ', 3 - pad, dn->logid); + return; + } + seq_puts(s, " |"); +@@ -519,33 +500,32 @@ static int arm_cmn_map_show(struct seq_file *s, void *data) + + seq_puts(s, " X"); + for (x = 0; x < cmn->mesh_x; x++) +- seq_printf(s, " %d ", x); ++ seq_printf(s, " %-2d ", x); + seq_puts(s, "\nY P D+"); + y = cmn->mesh_y; + while (y--) { + int xp_base = cmn->mesh_x * y; ++ struct arm_cmn_node *xp = cmn->xps + xp_base; + u8 port[CMN_MAX_PORTS][CMN_MAX_DIMENSION]; + + for (x = 0; x < cmn->mesh_x; x++) + seq_puts(s, "--------+"); + +- seq_printf(s, "\n%d |", y); ++ seq_printf(s, "\n%-2d |", y); + for (x = 0; x < cmn->mesh_x; x++) { +- struct arm_cmn_node *xp = cmn->xps + xp_base + x; +- + for (p = 0; p < CMN_MAX_PORTS; p++) +- port[p][x] = arm_cmn_device_connect_info(cmn, xp, p); +- seq_printf(s, " XP #%-2d |", xp_base + x); ++ port[p][x] = arm_cmn_device_connect_info(cmn, xp + x, p); ++ seq_printf(s, " XP #%-3d|", xp_base + x); + } + + seq_puts(s, "\n |"); + for (x = 0; x < cmn->mesh_x; x++) { +- u8 dtc = cmn->xps[xp_base + x].dtc; ++ s8 dtc = xp[x].dtc; + +- if (dtc & (dtc - 1)) ++ if (dtc < 0) + seq_puts(s, " DTC ?? |"); + else +- seq_printf(s, " DTC %ld |", __ffs(dtc)); ++ seq_printf(s, " DTC %d |", dtc); + } + seq_puts(s, "\n |"); + for (x = 0; x < cmn->mesh_x; x++) +@@ -557,10 +537,10 @@ static int arm_cmn_map_show(struct seq_file *s, void *data) + seq_puts(s, arm_cmn_device_type(port[p][x])); + seq_puts(s, "\n 0|"); + for (x = 0; x < cmn->mesh_x; x++) +- arm_cmn_show_logid(s, x, y, p, 0); ++ arm_cmn_show_logid(s, xp + x, p, 0); + seq_puts(s, "\n 1|"); + for (x = 0; x < cmn->mesh_x; x++) +- arm_cmn_show_logid(s, x, y, p, 1); ++ arm_cmn_show_logid(s, xp + x, p, 1); + } + seq_puts(s, "\n-----+"); + } +@@ -588,9 +568,8 @@ static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) {} + + struct arm_cmn_hw_event { + struct arm_cmn_node *dn; +- u64 dtm_idx[4]; +- unsigned int dtc_idx; +- u8 dtcs_used; ++ u64 dtm_idx[DIV_ROUND_UP(CMN_MAX_NODES_PER_EVENT * 2, 64)]; ++ s8 dtc_idx[CMN_MAX_DTCS]; + u8 num_dns; + u8 dtm_offset; + bool wide_sel; +@@ -600,6 +579,10 @@ struct arm_cmn_hw_event { + #define for_each_hw_dn(hw, dn, i) \ + for (i = 0, dn = hw->dn; i < hw->num_dns; i++, dn++) + ++/* @i is the DTC number, @idx is the counter index on that DTC */ ++#define for_each_hw_dtc_idx(hw, i, idx) \ ++ for (int i = 0, idx; i < CMN_MAX_DTCS; i++) if ((idx = hw->dtc_idx[i]) >= 0) ++ + static struct arm_cmn_hw_event *to_cmn_hw(struct perf_event *event) + { + BUILD_BUG_ON(sizeof(struct arm_cmn_hw_event) > offsetof(struct hw_perf_event, target)); +@@ -1429,12 +1412,11 @@ static void arm_cmn_init_counter(struct perf_event *event) + { + struct arm_cmn *cmn = to_cmn(event->pmu); + struct arm_cmn_hw_event *hw = to_cmn_hw(event); +- unsigned int i, pmevcnt = CMN_DT_PMEVCNT(hw->dtc_idx); + u64 count; + +- for (i = 0; hw->dtcs_used & (1U << i); i++) { +- writel_relaxed(CMN_COUNTER_INIT, cmn->dtc[i].base + pmevcnt); +- cmn->dtc[i].counters[hw->dtc_idx] = event; ++ for_each_hw_dtc_idx(hw, i, idx) { ++ writel_relaxed(CMN_COUNTER_INIT, cmn->dtc[i].base + CMN_DT_PMEVCNT(idx)); ++ cmn->dtc[i].counters[idx] = event; + } + + count = arm_cmn_read_dtm(cmn, hw, false); +@@ -1447,11 +1429,9 @@ static void arm_cmn_event_read(struct perf_event *event) + struct arm_cmn_hw_event *hw = to_cmn_hw(event); + u64 delta, new, prev; + unsigned long flags; +- unsigned int i; + +- if (hw->dtc_idx == CMN_DT_NUM_COUNTERS) { +- i = __ffs(hw->dtcs_used); +- delta = arm_cmn_read_cc(cmn->dtc + i); ++ if (CMN_EVENT_TYPE(event) == CMN_TYPE_DTC) { ++ delta = arm_cmn_read_cc(cmn->dtc + hw->dtc_idx[0]); + local64_add(delta, &event->count); + return; + } +@@ -1461,8 +1441,8 @@ static void arm_cmn_event_read(struct perf_event *event) + delta = new - prev; + + local_irq_save(flags); +- for (i = 0; hw->dtcs_used & (1U << i); i++) { +- new = arm_cmn_read_counter(cmn->dtc + i, hw->dtc_idx); ++ for_each_hw_dtc_idx(hw, i, idx) { ++ new = arm_cmn_read_counter(cmn->dtc + i, idx); + delta += new << 16; + } + local_irq_restore(flags); +@@ -1518,7 +1498,7 @@ static void arm_cmn_event_start(struct perf_event *event, int flags) + int i; + + if (type == CMN_TYPE_DTC) { +- i = __ffs(hw->dtcs_used); ++ i = hw->dtc_idx[0]; + writeq_relaxed(CMN_CC_INIT, cmn->dtc[i].base + CMN_DT_PMCCNTR); + cmn->dtc[i].cc_active = true; + } else if (type == CMN_TYPE_WP) { +@@ -1549,7 +1529,7 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags) + int i; + + if (type == CMN_TYPE_DTC) { +- i = __ffs(hw->dtcs_used); ++ i = hw->dtc_idx[0]; + cmn->dtc[i].cc_active = false; + } else if (type == CMN_TYPE_WP) { + int wp_idx = arm_cmn_wp_idx(event); +@@ -1735,29 +1715,27 @@ static int arm_cmn_event_init(struct perf_event *event) + hw->dn = arm_cmn_node(cmn, type); + if (!hw->dn) + return -EINVAL; ++ ++ memset(hw->dtc_idx, -1, sizeof(hw->dtc_idx)); + for (dn = hw->dn; dn->type == type; dn++) { + if (bynodeid && dn->id != nodeid) { + hw->dn++; + continue; + } + hw->num_dns++; ++ if (dn->dtc < 0) ++ memset(hw->dtc_idx, 0, cmn->num_dtcs); ++ else ++ hw->dtc_idx[dn->dtc] = 0; ++ + if (bynodeid) + break; + } + + if (!hw->num_dns) { +- struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, nodeid); +- +- dev_dbg(cmn->dev, "invalid node 0x%x (%d,%d,%d,%d) type 0x%x\n", +- nodeid, nid.x, nid.y, nid.port, nid.dev, type); ++ dev_dbg(cmn->dev, "invalid node 0x%x type 0x%x\n", nodeid, type); + return -EINVAL; + } +- /* +- * Keep assuming non-cycles events count in all DTC domains; turns out +- * it's hard to make a worthwhile optimisation around this, short of +- * going all-in with domain-local counter allocation as well. +- */ +- hw->dtcs_used = (1U << cmn->num_dtcs) - 1; + + return arm_cmn_validate_group(cmn, event); + } +@@ -1783,28 +1761,25 @@ static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event, + } + memset(hw->dtm_idx, 0, sizeof(hw->dtm_idx)); + +- for (i = 0; hw->dtcs_used & (1U << i); i++) +- cmn->dtc[i].counters[hw->dtc_idx] = NULL; ++ for_each_hw_dtc_idx(hw, j, idx) ++ cmn->dtc[j].counters[idx] = NULL; + } + + static int arm_cmn_event_add(struct perf_event *event, int flags) + { + struct arm_cmn *cmn = to_cmn(event->pmu); + struct arm_cmn_hw_event *hw = to_cmn_hw(event); +- struct arm_cmn_dtc *dtc = &cmn->dtc[0]; + struct arm_cmn_node *dn; + enum cmn_node_type type = CMN_EVENT_TYPE(event); +- unsigned int i, dtc_idx, input_sel; ++ unsigned int input_sel, i = 0; + + if (type == CMN_TYPE_DTC) { +- i = 0; + while (cmn->dtc[i].cycles) + if (++i == cmn->num_dtcs) + return -ENOSPC; + + cmn->dtc[i].cycles = event; +- hw->dtc_idx = CMN_DT_NUM_COUNTERS; +- hw->dtcs_used = 1U << i; ++ hw->dtc_idx[0] = i; + + if (flags & PERF_EF_START) + arm_cmn_event_start(event, 0); +@@ -1812,17 +1787,22 @@ static int arm_cmn_event_add(struct perf_event *event, int flags) + } + + /* Grab a free global counter first... */ +- dtc_idx = 0; +- while (dtc->counters[dtc_idx]) +- if (++dtc_idx == CMN_DT_NUM_COUNTERS) +- return -ENOSPC; +- +- hw->dtc_idx = dtc_idx; ++ for_each_hw_dtc_idx(hw, j, idx) { ++ if (j > 0) { ++ idx = hw->dtc_idx[0]; ++ } else { ++ idx = 0; ++ while (cmn->dtc[j].counters[idx]) ++ if (++idx == CMN_DT_NUM_COUNTERS) ++ return -ENOSPC; ++ } ++ hw->dtc_idx[j] = idx; ++ } + + /* ...then the local counters to feed it. */ + for_each_hw_dn(hw, dn, i) { + struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset; +- unsigned int dtm_idx, shift; ++ unsigned int dtm_idx, shift, d = 0; + u64 reg; + + dtm_idx = 0; +@@ -1841,14 +1821,14 @@ static int arm_cmn_event_add(struct perf_event *event, int flags) + + tmp = dtm->wp_event[wp_idx ^ 1]; + if (tmp >= 0 && CMN_EVENT_WP_COMBINE(event) != +- CMN_EVENT_WP_COMBINE(dtc->counters[tmp])) ++ CMN_EVENT_WP_COMBINE(cmn->dtc[d].counters[tmp])) + goto free_dtms; + + input_sel = CMN__PMEVCNT0_INPUT_SEL_WP + wp_idx; +- dtm->wp_event[wp_idx] = dtc_idx; ++ dtm->wp_event[wp_idx] = hw->dtc_idx[d]; + writel_relaxed(cfg, dtm->base + CMN_DTM_WPn_CONFIG(wp_idx)); + } else { +- struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); ++ struct arm_cmn_nodeid nid = arm_cmn_nid(dn); + + if (cmn->multi_dtm) + nid.port %= 2; +@@ -1865,7 +1845,7 @@ static int arm_cmn_event_add(struct perf_event *event, int flags) + dtm->input_sel[dtm_idx] = input_sel; + shift = CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(dtm_idx); + dtm->pmu_config_low &= ~(CMN__PMEVCNT0_GLOBAL_NUM << shift); +- dtm->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, dtc_idx) << shift; ++ dtm->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, hw->dtc_idx[d]) << shift; + dtm->pmu_config_low |= CMN__PMEVCNT_PAIRED(dtm_idx); + reg = (u64)le32_to_cpu(dtm->pmu_config_high) << 32 | dtm->pmu_config_low; + writeq_relaxed(reg, dtm->base + CMN_DTM_PMU_CONFIG); +@@ -1893,7 +1873,7 @@ static void arm_cmn_event_del(struct perf_event *event, int flags) + arm_cmn_event_stop(event, PERF_EF_UPDATE); + + if (type == CMN_TYPE_DTC) +- cmn->dtc[__ffs(hw->dtcs_used)].cycles = NULL; ++ cmn->dtc[hw->dtc_idx[0]].cycles = NULL; + else + arm_cmn_event_clear(cmn, event, hw->num_dns); + } +@@ -2074,7 +2054,6 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn) + { + struct arm_cmn_node *dn, *xp; + int dtc_idx = 0; +- u8 dtcs_present = (1 << cmn->num_dtcs) - 1; + + cmn->dtc = devm_kcalloc(cmn->dev, cmn->num_dtcs, sizeof(cmn->dtc[0]), GFP_KERNEL); + if (!cmn->dtc) +@@ -2084,23 +2063,28 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn) + + cmn->xps = arm_cmn_node(cmn, CMN_TYPE_XP); + ++ if (cmn->part == PART_CMN600 && cmn->num_dtcs > 1) { ++ /* We do at least know that a DTC's XP must be in that DTC's domain */ ++ dn = arm_cmn_node(cmn, CMN_TYPE_DTC); ++ for (int i = 0; i < cmn->num_dtcs; i++) ++ arm_cmn_node_to_xp(cmn, dn + i)->dtc = i; ++ } ++ + for (dn = cmn->dns; dn->type; dn++) { +- if (dn->type == CMN_TYPE_XP) { +- dn->dtc &= dtcs_present; ++ if (dn->type == CMN_TYPE_XP) + continue; +- } + + xp = arm_cmn_node_to_xp(cmn, dn); ++ dn->portid_bits = xp->portid_bits; ++ dn->deviceid_bits = xp->deviceid_bits; ++ dn->dtc = xp->dtc; + dn->dtm = xp->dtm; + if (cmn->multi_dtm) +- dn->dtm += arm_cmn_nid(cmn, dn->id).port / 2; ++ dn->dtm += arm_cmn_nid(dn).port / 2; + + if (dn->type == CMN_TYPE_DTC) { +- int err; +- /* We do at least know that a DTC's XP must be in that DTC's domain */ +- if (xp->dtc == 0xf) +- xp->dtc = 1 << dtc_idx; +- err = arm_cmn_init_dtc(cmn, dn, dtc_idx++); ++ int err = arm_cmn_init_dtc(cmn, dn, dtc_idx++); ++ + if (err) + return err; + } +@@ -2258,26 +2242,35 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) + cmn->mesh_x = xp->logid; + + if (cmn->part == PART_CMN600) +- xp->dtc = 0xf; ++ xp->dtc = -1; + else +- xp->dtc = 1 << arm_cmn_dtc_domain(cmn, xp_region); ++ xp->dtc = arm_cmn_dtc_domain(cmn, xp_region); + + xp->dtm = dtm - cmn->dtms; + arm_cmn_init_dtm(dtm++, xp, 0); + /* + * Keeping track of connected ports will let us filter out +- * unnecessary XP events easily. We can also reliably infer the +- * "extra device ports" configuration for the node ID format +- * from this, since in that case we will see at least one XP +- * with port 2 connected, for the HN-D. ++ * unnecessary XP events easily, and also infer the per-XP ++ * part of the node ID format. + */ + for (int p = 0; p < CMN_MAX_PORTS; p++) + if (arm_cmn_device_connect_info(cmn, xp, p)) + xp_ports |= BIT(p); + +- if (cmn->multi_dtm && (xp_ports & 0xc)) ++ if (cmn->num_xps == 1) { ++ xp->portid_bits = 3; ++ xp->deviceid_bits = 2; ++ } else if (xp_ports > 0x3) { ++ xp->portid_bits = 2; ++ xp->deviceid_bits = 1; ++ } else { ++ xp->portid_bits = 1; ++ xp->deviceid_bits = 2; ++ } ++ ++ if (cmn->multi_dtm && (xp_ports > 0x3)) + arm_cmn_init_dtm(dtm++, xp, 1); +- if (cmn->multi_dtm && (xp_ports & 0x30)) ++ if (cmn->multi_dtm && (xp_ports > 0xf)) + arm_cmn_init_dtm(dtm++, xp, 2); + + cmn->ports_used |= xp_ports; +@@ -2332,10 +2325,13 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) + case CMN_TYPE_CXHA: + case CMN_TYPE_CCRA: + case CMN_TYPE_CCHA: +- case CMN_TYPE_CCLA: + case CMN_TYPE_HNS: + dn++; + break; ++ case CMN_TYPE_CCLA: ++ dn->pmu_base += CMN_CCLA_PMU_EVENT_SEL; ++ dn++; ++ break; + /* Nothing to see here */ + case CMN_TYPE_MPAM_S: + case CMN_TYPE_MPAM_NS: +@@ -2353,7 +2349,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) + case CMN_TYPE_HNP: + case CMN_TYPE_CCLA_RNI: + dn[1] = dn[0]; +- dn[0].pmu_base += CMN_HNP_PMU_EVENT_SEL; ++ dn[0].pmu_base += CMN_CCLA_PMU_EVENT_SEL; + dn[1].type = arm_cmn_subtype(dn->type); + dn += 2; + break; +diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c +index 430ca15373fe2c..4a902da5c1d495 100644 +--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c ++++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c +@@ -221,7 +221,7 @@ static void hisi_pcie_pmu_config_filter(struct perf_event *event) + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u64 port, trig_len, thr_len, len_mode; +- u64 reg = HISI_PCIE_INIT_SET; ++ u64 reg = 0; + + /* Config HISI_PCIE_EVENT_CTRL according to event. */ + reg |= FIELD_PREP(HISI_PCIE_EVENT_M, hisi_pcie_get_real_event(event)); +@@ -458,10 +458,24 @@ static void hisi_pcie_pmu_set_period(struct perf_event *event) + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; ++ u64 orig_cnt, cnt; ++ ++ orig_cnt = hisi_pcie_pmu_read_counter(event); + + local64_set(&hwc->prev_count, HISI_PCIE_INIT_VAL); + hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_CNT, idx, HISI_PCIE_INIT_VAL); + hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EXT_CNT, idx, HISI_PCIE_INIT_VAL); ++ ++ /* ++ * The counter maybe unwritable if the target event is unsupported. ++ * Check this by comparing the counts after setting the period. If ++ * the counts stay unchanged after setting the period then update ++ * the hwc->prev_count correctly. Otherwise the final counts user ++ * get maybe totally wrong. ++ */ ++ cnt = hisi_pcie_pmu_read_counter(event); ++ if (orig_cnt == cnt) ++ local64_set(&hwc->prev_count, cnt); + } + + static void hisi_pcie_pmu_enable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc) +diff --git a/drivers/pinctrl/bcm/pinctrl-ns.c b/drivers/pinctrl/bcm/pinctrl-ns.c +index f80630a74d34a5..d099a7f25f64c9 100644 +--- a/drivers/pinctrl/bcm/pinctrl-ns.c ++++ b/drivers/pinctrl/bcm/pinctrl-ns.c +@@ -7,11 +7,11 @@ + #include + #include + #include +-#include + #include + #include + #include + #include ++#include + #include + + #include "../core.h" +@@ -208,7 +208,6 @@ static const struct of_device_id ns_pinctrl_of_match_table[] = { + static int ns_pinctrl_probe(struct platform_device *pdev) + { + struct device *dev = &pdev->dev; +- const struct of_device_id *of_id; + struct ns_pinctrl *ns_pinctrl; + struct pinctrl_desc *pctldesc; + struct pinctrl_pin_desc *pin; +@@ -225,10 +224,7 @@ static int ns_pinctrl_probe(struct platform_device *pdev) + + ns_pinctrl->dev = dev; + +- of_id = of_match_device(ns_pinctrl_of_match_table, dev); +- if (!of_id) +- return -EINVAL; +- ns_pinctrl->chipset_flag = (uintptr_t)of_id->data; ++ ns_pinctrl->chipset_flag = (uintptr_t)device_get_match_data(dev); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "cru_gpio_control"); +diff --git a/drivers/pinctrl/berlin/berlin-bg2.c b/drivers/pinctrl/berlin/berlin-bg2.c +index acbd413340e8be..15aed44676271a 100644 +--- a/drivers/pinctrl/berlin/berlin-bg2.c ++++ b/drivers/pinctrl/berlin/berlin-bg2.c +@@ -8,8 +8,9 @@ + */ + + #include +-#include ++#include + #include ++#include + #include + + #include "berlin.h" +@@ -227,10 +228,7 @@ static const struct of_device_id berlin2_pinctrl_match[] = { + + static int berlin2_pinctrl_probe(struct platform_device *pdev) + { +- const struct of_device_id *match = +- of_match_device(berlin2_pinctrl_match, &pdev->dev); +- +- return berlin_pinctrl_probe(pdev, match->data); ++ return berlin_pinctrl_probe(pdev, device_get_match_data(&pdev->dev)); + } + + static struct platform_driver berlin2_pinctrl_driver = { +diff --git a/drivers/pinctrl/berlin/berlin-bg2cd.c b/drivers/pinctrl/berlin/berlin-bg2cd.c +index c0f5d86d5d01d9..73a1d8c2308866 100644 +--- a/drivers/pinctrl/berlin/berlin-bg2cd.c ++++ b/drivers/pinctrl/berlin/berlin-bg2cd.c +@@ -8,8 +8,9 @@ + */ + + #include +-#include ++#include + #include ++#include + #include + + #include "berlin.h" +@@ -172,10 +173,7 @@ static const struct of_device_id berlin2cd_pinctrl_match[] = { + + static int berlin2cd_pinctrl_probe(struct platform_device *pdev) + { +- const struct of_device_id *match = +- of_match_device(berlin2cd_pinctrl_match, &pdev->dev); +- +- return berlin_pinctrl_probe(pdev, match->data); ++ return berlin_pinctrl_probe(pdev, device_get_match_data(&pdev->dev)); + } + + static struct platform_driver berlin2cd_pinctrl_driver = { +diff --git a/drivers/pinctrl/berlin/berlin-bg2q.c b/drivers/pinctrl/berlin/berlin-bg2q.c +index 20a3216ede07a7..a5dbc8f279e70a 100644 +--- a/drivers/pinctrl/berlin/berlin-bg2q.c ++++ b/drivers/pinctrl/berlin/berlin-bg2q.c +@@ -8,8 +8,9 @@ + */ + + #include +-#include ++#include + #include ++#include + #include + + #include "berlin.h" +@@ -389,10 +390,7 @@ static const struct of_device_id berlin2q_pinctrl_match[] = { + + static int berlin2q_pinctrl_probe(struct platform_device *pdev) + { +- const struct of_device_id *match = +- of_match_device(berlin2q_pinctrl_match, &pdev->dev); +- +- return berlin_pinctrl_probe(pdev, match->data); ++ return berlin_pinctrl_probe(pdev, device_get_match_data(&pdev->dev)); + } + + static struct platform_driver berlin2q_pinctrl_driver = { +diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c +index 3026a3b3da2dd9..9bf0a54f2798a2 100644 +--- a/drivers/pinctrl/berlin/berlin-bg4ct.c ++++ b/drivers/pinctrl/berlin/berlin-bg4ct.c +@@ -8,8 +8,9 @@ + */ + + #include +-#include ++#include + #include ++#include + #include + + #include "berlin.h" +@@ -449,8 +450,8 @@ static const struct of_device_id berlin4ct_pinctrl_match[] = { + + static int berlin4ct_pinctrl_probe(struct platform_device *pdev) + { +- const struct of_device_id *match = +- of_match_device(berlin4ct_pinctrl_match, &pdev->dev); ++ const struct berlin_pinctrl_desc *desc = ++ device_get_match_data(&pdev->dev); + struct regmap_config *rmconfig; + struct regmap *regmap; + struct resource *res; +@@ -473,7 +474,7 @@ static int berlin4ct_pinctrl_probe(struct platform_device *pdev) + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + +- return berlin_pinctrl_probe_regmap(pdev, match->data, regmap); ++ return berlin_pinctrl_probe_regmap(pdev, desc, regmap); + } + + static struct platform_driver berlin4ct_pinctrl_driver = { +diff --git a/drivers/pinctrl/berlin/pinctrl-as370.c b/drivers/pinctrl/berlin/pinctrl-as370.c +index b631c14813a7dc..fc0daec94e1059 100644 +--- a/drivers/pinctrl/berlin/pinctrl-as370.c ++++ b/drivers/pinctrl/berlin/pinctrl-as370.c +@@ -8,8 +8,9 @@ + */ + + #include +-#include ++#include + #include ++#include + #include + + #include "berlin.h" +@@ -330,8 +331,8 @@ static const struct of_device_id as370_pinctrl_match[] = { + + static int as370_pinctrl_probe(struct platform_device *pdev) + { +- const struct of_device_id *match = +- of_match_device(as370_pinctrl_match, &pdev->dev); ++ const struct berlin_pinctrl_desc *desc = ++ device_get_match_data(&pdev->dev); + struct regmap_config *rmconfig; + struct regmap *regmap; + struct resource *res; +@@ -354,7 +355,7 @@ static int as370_pinctrl_probe(struct platform_device *pdev) + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + +- return berlin_pinctrl_probe_regmap(pdev, match->data, regmap); ++ return berlin_pinctrl_probe_regmap(pdev, desc, regmap); + } + + static struct platform_driver as370_pinctrl_driver = { +diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c +index 040e418dbfc1be..162dfc213669a7 100644 +--- a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c ++++ b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c +@@ -12,8 +12,8 @@ + #include + #include + #include +-#include + #include ++#include + + #include "pinctrl-mvebu.h" + +@@ -404,13 +404,8 @@ static struct pinctrl_gpio_range armada_38x_mpp_gpio_ranges[] = { + static int armada_38x_pinctrl_probe(struct platform_device *pdev) + { + struct mvebu_pinctrl_soc_info *soc = &armada_38x_pinctrl_info; +- const struct of_device_id *match = +- of_match_device(armada_38x_pinctrl_of_match, &pdev->dev); + +- if (!match) +- return -ENODEV; +- +- soc->variant = (unsigned) match->data & 0xff; ++ soc->variant = (unsigned)device_get_match_data(&pdev->dev) & 0xff; + soc->controls = armada_38x_mpp_controls; + soc->ncontrols = ARRAY_SIZE(armada_38x_mpp_controls); + soc->gpioranges = armada_38x_mpp_gpio_ranges; +diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c +index c33f1cbaf661aa..d9c98faa7b0e94 100644 +--- a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c ++++ b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c +@@ -12,8 +12,8 @@ + #include + #include + #include +-#include + #include ++#include + + #include "pinctrl-mvebu.h" + +@@ -386,13 +386,8 @@ static struct pinctrl_gpio_range armada_39x_mpp_gpio_ranges[] = { + static int armada_39x_pinctrl_probe(struct platform_device *pdev) + { + struct mvebu_pinctrl_soc_info *soc = &armada_39x_pinctrl_info; +- const struct of_device_id *match = +- of_match_device(armada_39x_pinctrl_of_match, &pdev->dev); + +- if (!match) +- return -ENODEV; +- +- soc->variant = (unsigned) match->data & 0xff; ++ soc->variant = (unsigned)device_get_match_data(&pdev->dev) & 0xff; + soc->controls = armada_39x_mpp_controls; + soc->ncontrols = ARRAY_SIZE(armada_39x_mpp_controls); + soc->gpioranges = armada_39x_mpp_gpio_ranges; +diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-ap806.c b/drivers/pinctrl/mvebu/pinctrl-armada-ap806.c +index 89bab536717df6..7becf2781a0b9f 100644 +--- a/drivers/pinctrl/mvebu/pinctrl-armada-ap806.c ++++ b/drivers/pinctrl/mvebu/pinctrl-armada-ap806.c +@@ -13,7 +13,6 @@ + #include + #include + #include +-#include + #include + + #include "pinctrl-mvebu.h" +@@ -106,10 +105,8 @@ static struct pinctrl_gpio_range armada_ap806_mpp_gpio_ranges[] = { + static int armada_ap806_pinctrl_probe(struct platform_device *pdev) + { + struct mvebu_pinctrl_soc_info *soc = &armada_ap806_pinctrl_info; +- const struct of_device_id *match = +- of_match_device(armada_ap806_pinctrl_of_match, &pdev->dev); + +- if (!match || !pdev->dev.parent) ++ if (!pdev->dev.parent) + return -ENODEV; + + soc->variant = 0; /* no variants for Armada AP806 */ +diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c b/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c +index 8ba8f3e9121f04..9a250c491f33d3 100644 +--- a/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c ++++ b/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c +@@ -12,9 +12,9 @@ + #include + #include + #include +-#include + #include + #include ++#include + + #include "pinctrl-mvebu.h" + +@@ -638,8 +638,6 @@ static void mvebu_pinctrl_assign_variant(struct mvebu_mpp_mode *m, + static int armada_cp110_pinctrl_probe(struct platform_device *pdev) + { + struct mvebu_pinctrl_soc_info *soc; +- const struct of_device_id *match = +- of_match_device(armada_cp110_pinctrl_of_match, &pdev->dev); + int i; + + if (!pdev->dev.parent) +@@ -650,7 +648,7 @@ static int armada_cp110_pinctrl_probe(struct platform_device *pdev) + if (!soc) + return -ENOMEM; + +- soc->variant = (unsigned long) match->data & 0xff; ++ soc->variant = (unsigned long)device_get_match_data(&pdev->dev) & 0xff; + soc->controls = armada_cp110_mpp_controls; + soc->ncontrols = ARRAY_SIZE(armada_cp110_mpp_controls); + soc->modes = armada_cp110_mpp_modes; +diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c +index 48e2a6c56a83b9..487825bfd125f3 100644 +--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c ++++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c +@@ -19,8 +19,8 @@ + #include + #include + #include +-#include + #include ++#include + #include + + #include "pinctrl-mvebu.h" +@@ -568,14 +568,9 @@ static int armada_xp_pinctrl_resume(struct platform_device *pdev) + static int armada_xp_pinctrl_probe(struct platform_device *pdev) + { + struct mvebu_pinctrl_soc_info *soc = &armada_xp_pinctrl_info; +- const struct of_device_id *match = +- of_match_device(armada_xp_pinctrl_of_match, &pdev->dev); + int nregs; + +- if (!match) +- return -ENODEV; +- +- soc->variant = (unsigned) match->data & 0xff; ++ soc->variant = (unsigned)device_get_match_data(&pdev->dev) & 0xff; + + switch (soc->variant) { + case V_MV78230: +diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c +index bd74daa9ed6663..dce601d993728c 100644 +--- a/drivers/pinctrl/mvebu/pinctrl-dove.c ++++ b/drivers/pinctrl/mvebu/pinctrl-dove.c +@@ -12,9 +12,9 @@ + #include + #include + #include +-#include + #include + #include ++#include + #include + + #include "pinctrl-mvebu.h" +@@ -765,13 +765,11 @@ static int dove_pinctrl_probe(struct platform_device *pdev) + { + struct resource *res, *mpp_res; + struct resource fb_res; +- const struct of_device_id *match = +- of_match_device(dove_pinctrl_of_match, &pdev->dev); + struct mvebu_mpp_ctrl_data *mpp_data; + void __iomem *base; +- int i; ++ int i, ret; + +- pdev->dev.platform_data = (void *)match->data; ++ pdev->dev.platform_data = (void *)device_get_match_data(&pdev->dev); + + /* + * General MPP Configuration Register is part of pdma registers. +@@ -785,13 +783,17 @@ static int dove_pinctrl_probe(struct platform_device *pdev) + clk_prepare_enable(clk); + + base = devm_platform_get_and_ioremap_resource(pdev, 0, &mpp_res); +- if (IS_ERR(base)) +- return PTR_ERR(base); ++ if (IS_ERR(base)) { ++ ret = PTR_ERR(base); ++ goto err_probe; ++ } + + mpp_data = devm_kcalloc(&pdev->dev, dove_pinctrl_info.ncontrols, + sizeof(*mpp_data), GFP_KERNEL); +- if (!mpp_data) +- return -ENOMEM; ++ if (!mpp_data) { ++ ret = -ENOMEM; ++ goto err_probe; ++ } + + dove_pinctrl_info.control_data = mpp_data; + for (i = 0; i < ARRAY_SIZE(dove_mpp_controls); i++) +@@ -810,8 +812,10 @@ static int dove_pinctrl_probe(struct platform_device *pdev) + } + + mpp4_base = devm_ioremap_resource(&pdev->dev, res); +- if (IS_ERR(mpp4_base)) +- return PTR_ERR(mpp4_base); ++ if (IS_ERR(mpp4_base)) { ++ ret = PTR_ERR(mpp4_base); ++ goto err_probe; ++ } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + if (!res) { +@@ -822,8 +826,10 @@ static int dove_pinctrl_probe(struct platform_device *pdev) + } + + pmu_base = devm_ioremap_resource(&pdev->dev, res); +- if (IS_ERR(pmu_base)) +- return PTR_ERR(pmu_base); ++ if (IS_ERR(pmu_base)) { ++ ret = PTR_ERR(pmu_base); ++ goto err_probe; ++ } + + gconfmap = syscon_regmap_lookup_by_compatible("marvell,dove-global-config"); + if (IS_ERR(gconfmap)) { +@@ -833,12 +839,17 @@ static int dove_pinctrl_probe(struct platform_device *pdev) + adjust_resource(&fb_res, + (mpp_res->start & INT_REGS_MASK) + GC_REGS_OFFS, 0x14); + gc_base = devm_ioremap_resource(&pdev->dev, &fb_res); +- if (IS_ERR(gc_base)) +- return PTR_ERR(gc_base); ++ if (IS_ERR(gc_base)) { ++ ret = PTR_ERR(gc_base); ++ goto err_probe; ++ } ++ + gconfmap = devm_regmap_init_mmio(&pdev->dev, + gc_base, &gc_regmap_config); +- if (IS_ERR(gconfmap)) +- return PTR_ERR(gconfmap); ++ if (IS_ERR(gconfmap)) { ++ ret = PTR_ERR(gconfmap); ++ goto err_probe; ++ } + } + + /* Warn on any missing DT resource */ +@@ -846,6 +857,9 @@ static int dove_pinctrl_probe(struct platform_device *pdev) + dev_warn(&pdev->dev, FW_BUG "Missing pinctrl regs in DTB. Please update your firmware.\n"); + + return mvebu_pinctrl_probe(pdev); ++err_probe: ++ clk_disable_unprepare(clk); ++ return ret; + } + + static struct platform_driver dove_pinctrl_driver = { +diff --git a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c +index d45c31f281c856..4789d7442f788e 100644 +--- a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c ++++ b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c +@@ -11,8 +11,8 @@ + #include + #include + #include +-#include + #include ++#include + + #include "pinctrl-mvebu.h" + +@@ -470,10 +470,7 @@ static const struct of_device_id kirkwood_pinctrl_of_match[] = { + + static int kirkwood_pinctrl_probe(struct platform_device *pdev) + { +- const struct of_device_id *match = +- of_match_device(kirkwood_pinctrl_of_match, &pdev->dev); +- +- pdev->dev.platform_data = (void *)match->data; ++ pdev->dev.platform_data = (void *)device_get_match_data(&pdev->dev); + + return mvebu_pinctrl_simple_mmio_probe(pdev); + } +diff --git a/drivers/pinctrl/mvebu/pinctrl-orion.c b/drivers/pinctrl/mvebu/pinctrl-orion.c +index cc97d270be61b4..2b6ab7f2afc781 100644 +--- a/drivers/pinctrl/mvebu/pinctrl-orion.c ++++ b/drivers/pinctrl/mvebu/pinctrl-orion.c +@@ -19,8 +19,8 @@ + #include + #include + #include +-#include + #include ++#include + + #include "pinctrl-mvebu.h" + +@@ -218,10 +218,7 @@ static const struct of_device_id orion_pinctrl_of_match[] = { + + static int orion_pinctrl_probe(struct platform_device *pdev) + { +- const struct of_device_id *match = +- of_match_device(orion_pinctrl_of_match, &pdev->dev); +- +- pdev->dev.platform_data = (void*)match->data; ++ pdev->dev.platform_data = (void*)device_get_match_data(&pdev->dev); + + mpp_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mpp_base)) +diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c +index 6b90051af20674..0cfa74365733ca 100644 +--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c ++++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -985,7 +986,6 @@ static const struct of_device_id abx500_gpio_match[] = { + static int abx500_gpio_probe(struct platform_device *pdev) + { + struct device_node *np = pdev->dev.of_node; +- const struct of_device_id *match; + struct abx500_pinctrl *pct; + unsigned int id = -1; + int ret; +@@ -1006,12 +1006,7 @@ static int abx500_gpio_probe(struct platform_device *pdev) + pct->chip.parent = &pdev->dev; + pct->chip.base = -1; /* Dynamic allocation */ + +- match = of_match_device(abx500_gpio_match, &pdev->dev); +- if (!match) { +- dev_err(&pdev->dev, "gpio dt not matching\n"); +- return -ENODEV; +- } +- id = (unsigned long)match->data; ++ id = (unsigned long)device_get_match_data(&pdev->dev); + + /* Poke in other ASIC variants here */ + switch (id) { +diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c +index e7d33093994b2a..445c61a4a7e553 100644 +--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c ++++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c +@@ -16,9 +16,11 @@ + #include + #include + #include ++#include + #include +-#include ++#include + #include ++#include + #include + #include + #include +@@ -1840,7 +1842,6 @@ static int nmk_pinctrl_resume(struct device *dev) + + static int nmk_pinctrl_probe(struct platform_device *pdev) + { +- const struct of_device_id *match; + struct device_node *np = pdev->dev.of_node; + struct device_node *prcm_np; + struct nmk_pinctrl *npct; +@@ -1851,10 +1852,7 @@ static int nmk_pinctrl_probe(struct platform_device *pdev) + if (!npct) + return -ENOMEM; + +- match = of_match_device(nmk_pinctrl_match, &pdev->dev); +- if (!match) +- return -ENODEV; +- version = (unsigned int) match->data; ++ version = (unsigned int)device_get_match_data(&pdev->dev); + + /* Poke in other ASIC variants here */ + if (version == PINCTRL_NMK_STN8815) +diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c +index ad30fd47a4bb0b..d7b66928a4e50d 100644 +--- a/drivers/pinctrl/pinctrl-at91.c ++++ b/drivers/pinctrl/pinctrl-at91.c +@@ -12,10 +12,9 @@ + #include + #include + #include +-#include +-#include +-#include ++#include + #include ++#include + #include + #include + #include +@@ -1302,8 +1301,8 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev, + if (!np) + return -ENODEV; + +- info->dev = dev; +- info->ops = of_device_get_match_data(dev); ++ info->dev = &pdev->dev; ++ info->ops = device_get_match_data(&pdev->dev); + at91_pinctrl_child_count(info, np); + + /* +@@ -1848,7 +1847,7 @@ static int at91_gpio_probe(struct platform_device *pdev) + if (IS_ERR(at91_chip->regbase)) + return PTR_ERR(at91_chip->regbase); + +- at91_chip->ops = of_device_get_match_data(dev); ++ at91_chip->ops = device_get_match_data(dev); + at91_chip->pioc_virq = irq; + + at91_chip->clock = devm_clk_get_enabled(dev, NULL); +diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c +index bbe7cc894b1a17..6c670203b3ac28 100644 +--- a/drivers/pinctrl/pinctrl-single.c ++++ b/drivers/pinctrl/pinctrl-single.c +@@ -1918,7 +1918,8 @@ static int pcs_probe(struct platform_device *pdev) + + dev_info(pcs->dev, "%i pins, size %u\n", pcs->desc.npins, pcs->size); + +- if (pinctrl_enable(pcs->pctl)) ++ ret = pinctrl_enable(pcs->pctl); ++ if (ret) + goto free; + + return 0; +diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c +index cf0383f575d9c9..f4256a918165f4 100644 +--- a/drivers/pinctrl/pinctrl-xway.c ++++ b/drivers/pinctrl/pinctrl-xway.c +@@ -11,12 +11,12 @@ + #include + #include + #include +-#include +-#include ++#include + #include + #include + #include + #include ++#include + + #include "pinctrl-lantiq.h" + +@@ -1451,7 +1451,6 @@ MODULE_DEVICE_TABLE(of, xway_match); + + static int pinmux_xway_probe(struct platform_device *pdev) + { +- const struct of_device_id *match; + const struct pinctrl_xway_soc *xway_soc; + int ret, i; + +@@ -1460,10 +1459,8 @@ static int pinmux_xway_probe(struct platform_device *pdev) + if (IS_ERR(xway_info.membase[0])) + return PTR_ERR(xway_info.membase[0]); + +- match = of_match_device(xway_match, &pdev->dev); +- if (match) +- xway_soc = (const struct pinctrl_xway_soc *) match->data; +- else ++ xway_soc = device_get_match_data(&pdev->dev); ++ if (!xway_soc) + xway_soc = &danube_pinctrl; + + /* find out how many pads we have */ +diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c +index 5370bbdf2e1a1e..451801acdc4038 100644 +--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c ++++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c +@@ -14,7 +14,8 @@ + #include + #include + #include +-#include ++#include ++#include + #include + #include + #include +@@ -272,6 +273,22 @@ static int ti_iodelay_pinconf_set(struct ti_iodelay_device *iod, + return r; + } + ++/** ++ * ti_iodelay_pinconf_deinit_dev() - deinit the iodelay device ++ * @data: IODelay device ++ * ++ * Deinitialize the IODelay device (basically just lock the region back up. ++ */ ++static void ti_iodelay_pinconf_deinit_dev(void *data) ++{ ++ struct ti_iodelay_device *iod = data; ++ const struct ti_iodelay_reg_data *reg = iod->reg_data; ++ ++ /* lock the iodelay region back again */ ++ regmap_update_bits(iod->regmap, reg->reg_global_lock_offset, ++ reg->global_lock_mask, reg->global_lock_val); ++} ++ + /** + * ti_iodelay_pinconf_init_dev() - Initialize IODelay device + * @iod: iodelay device +@@ -294,6 +311,11 @@ static int ti_iodelay_pinconf_init_dev(struct ti_iodelay_device *iod) + if (r) + return r; + ++ r = devm_add_action_or_reset(iod->dev, ti_iodelay_pinconf_deinit_dev, ++ iod); ++ if (r) ++ return r; ++ + /* Read up Recalibration sequence done by bootloader */ + r = regmap_read(iod->regmap, reg->reg_refclk_offset, &val); + if (r) +@@ -352,21 +374,6 @@ static int ti_iodelay_pinconf_init_dev(struct ti_iodelay_device *iod) + return 0; + } + +-/** +- * ti_iodelay_pinconf_deinit_dev() - deinit the iodelay device +- * @iod: IODelay device +- * +- * Deinitialize the IODelay device (basically just lock the region back up. +- */ +-static void ti_iodelay_pinconf_deinit_dev(struct ti_iodelay_device *iod) +-{ +- const struct ti_iodelay_reg_data *reg = iod->reg_data; +- +- /* lock the iodelay region back again */ +- regmap_update_bits(iod->regmap, reg->reg_global_lock_offset, +- reg->global_lock_mask, reg->global_lock_val); +-} +- + /** + * ti_iodelay_get_pingroup() - Find the group mapped by a group selector + * @iod: iodelay device +@@ -821,56 +828,48 @@ MODULE_DEVICE_TABLE(of, ti_iodelay_of_match); + static int ti_iodelay_probe(struct platform_device *pdev) + { + struct device *dev = &pdev->dev; +- struct device_node *np = of_node_get(dev->of_node); +- const struct of_device_id *match; ++ struct device_node *np __free(device_node) = of_node_get(dev->of_node); + struct resource *res; + struct ti_iodelay_device *iod; +- int ret = 0; ++ int ret; + + if (!np) { +- ret = -EINVAL; + dev_err(dev, "No OF node\n"); +- goto exit_out; +- } +- +- match = of_match_device(ti_iodelay_of_match, dev); +- if (!match) { +- ret = -EINVAL; +- dev_err(dev, "No DATA match\n"); +- goto exit_out; ++ return -EINVAL; + } + + iod = devm_kzalloc(dev, sizeof(*iod), GFP_KERNEL); +- if (!iod) { +- ret = -ENOMEM; +- goto exit_out; +- } ++ if (!iod) ++ return -ENOMEM; ++ + iod->dev = dev; +- iod->reg_data = match->data; ++ iod->reg_data = device_get_match_data(dev); ++ if (!iod->reg_data) { ++ dev_err(dev, "No DATA match\n"); ++ return -EINVAL; ++ } + + /* So far We can assume there is only 1 bank of registers */ + iod->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); +- if (IS_ERR(iod->reg_base)) { +- ret = PTR_ERR(iod->reg_base); +- goto exit_out; +- } ++ if (IS_ERR(iod->reg_base)) ++ return PTR_ERR(iod->reg_base); ++ + iod->phys_base = res->start; + + iod->regmap = devm_regmap_init_mmio(dev, iod->reg_base, + iod->reg_data->regmap_config); + if (IS_ERR(iod->regmap)) { + dev_err(dev, "Regmap MMIO init failed.\n"); +- ret = PTR_ERR(iod->regmap); +- goto exit_out; ++ return PTR_ERR(iod->regmap); + } + + ret = ti_iodelay_pinconf_init_dev(iod); + if (ret) +- goto exit_out; ++ return ret; + + ret = ti_iodelay_alloc_pins(dev, iod, res->start); + if (ret) +- goto exit_out; ++ return ret; + + iod->desc.pctlops = &ti_iodelay_pinctrl_ops; + /* no pinmux ops - we are pinconf */ +@@ -881,42 +880,14 @@ static int ti_iodelay_probe(struct platform_device *pdev) + ret = devm_pinctrl_register_and_init(dev, &iod->desc, iod, &iod->pctl); + if (ret) { + dev_err(dev, "Failed to register pinctrl\n"); +- goto exit_out; ++ return ret; + } + +- platform_set_drvdata(pdev, iod); +- +- ret = pinctrl_enable(iod->pctl); +- if (ret) +- goto exit_out; +- +- return 0; +- +-exit_out: +- of_node_put(np); +- return ret; +-} +- +-/** +- * ti_iodelay_remove() - standard remove +- * @pdev: platform device +- * +- * Return: 0 if all went fine, else appropriate error value. +- */ +-static int ti_iodelay_remove(struct platform_device *pdev) +-{ +- struct ti_iodelay_device *iod = platform_get_drvdata(pdev); +- +- ti_iodelay_pinconf_deinit_dev(iod); +- +- /* Expect other allocations to be freed by devm */ +- +- return 0; ++ return pinctrl_enable(iod->pctl); + } + + static struct platform_driver ti_iodelay_driver = { + .probe = ti_iodelay_probe, +- .remove = ti_iodelay_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = ti_iodelay_of_match, +diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c +index 6ac5c80cfda214..7520b599eb3d17 100644 +--- a/drivers/power/supply/axp20x_battery.c ++++ b/drivers/power/supply/axp20x_battery.c +@@ -303,11 +303,11 @@ static int axp20x_battery_get_prop(struct power_supply *psy, + val->intval = reg & AXP209_FG_PERCENT; + break; + +- case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: ++ case POWER_SUPPLY_PROP_VOLTAGE_MAX: + return axp20x_batt->data->get_max_voltage(axp20x_batt, + &val->intval); + +- case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: ++ case POWER_SUPPLY_PROP_VOLTAGE_MIN: + ret = regmap_read(axp20x_batt->regmap, AXP20X_V_OFF, ®); + if (ret) + return ret; +@@ -455,10 +455,10 @@ static int axp20x_battery_set_prop(struct power_supply *psy, + struct axp20x_batt_ps *axp20x_batt = power_supply_get_drvdata(psy); + + switch (psp) { +- case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: ++ case POWER_SUPPLY_PROP_VOLTAGE_MIN: + return axp20x_set_voltage_min_design(axp20x_batt, val->intval); + +- case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: ++ case POWER_SUPPLY_PROP_VOLTAGE_MAX: + return axp20x_batt->data->set_max_voltage(axp20x_batt, val->intval); + + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: +@@ -493,8 +493,8 @@ static enum power_supply_property axp20x_battery_props[] = { + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, + POWER_SUPPLY_PROP_HEALTH, +- POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, +- POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, ++ POWER_SUPPLY_PROP_VOLTAGE_MAX, ++ POWER_SUPPLY_PROP_VOLTAGE_MIN, + POWER_SUPPLY_PROP_CAPACITY, + }; + +@@ -502,8 +502,8 @@ static int axp20x_battery_prop_writeable(struct power_supply *psy, + enum power_supply_property psp) + { + return psp == POWER_SUPPLY_PROP_STATUS || +- psp == POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN || +- psp == POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN || ++ psp == POWER_SUPPLY_PROP_VOLTAGE_MIN || ++ psp == POWER_SUPPLY_PROP_VOLTAGE_MAX || + psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT || + psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX; + } +diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c +index 17ac2ab78c4e4c..ab97dd7ca5cb69 100644 +--- a/drivers/power/supply/max17042_battery.c ++++ b/drivers/power/supply/max17042_battery.c +@@ -853,7 +853,10 @@ static void max17042_set_soc_threshold(struct max17042_chip *chip, u16 off) + /* program interrupt thresholds such that we should + * get interrupt for every 'off' perc change in the soc + */ +- regmap_read(map, MAX17042_RepSOC, &soc); ++ if (chip->pdata->enable_current_sense) ++ regmap_read(map, MAX17042_RepSOC, &soc); ++ else ++ regmap_read(map, MAX17042_VFSOC, &soc); + soc >>= 8; + soc_tr = (soc + off) << 8; + if (off < soc) +diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c +index 3dfe45ac300aa9..f1de4111e98d9d 100644 +--- a/drivers/powercap/intel_rapl_common.c ++++ b/drivers/powercap/intel_rapl_common.c +@@ -738,7 +738,7 @@ static struct rapl_primitive_info *get_rpi(struct rapl_package *rp, int prim) + { + struct rapl_primitive_info *rpi = rp->priv->rpi; + +- if (prim < 0 || prim > NR_RAPL_PRIMITIVES || !rpi) ++ if (prim < 0 || prim >= NR_RAPL_PRIMITIVES || !rpi) + return NULL; + + return &rpi[prim]; +diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c +index 42f93d4c6ee329..53e9c304ae0a7a 100644 +--- a/drivers/pps/clients/pps_parport.c ++++ b/drivers/pps/clients/pps_parport.c +@@ -148,7 +148,10 @@ static void parport_attach(struct parport *port) + return; + } + +- index = ida_simple_get(&pps_client_index, 0, 0, GFP_KERNEL); ++ index = ida_alloc(&pps_client_index, GFP_KERNEL); ++ if (index < 0) ++ goto err_free_device; ++ + memset(&pps_client_cb, 0, sizeof(pps_client_cb)); + pps_client_cb.private = device; + pps_client_cb.irq_func = parport_irq; +@@ -159,7 +162,7 @@ static void parport_attach(struct parport *port) + index); + if (!device->pardev) { + pr_err("couldn't register with %s\n", port->name); +- goto err_free; ++ goto err_free_ida; + } + + if (parport_claim_or_block(device->pardev) < 0) { +@@ -187,8 +190,9 @@ static void parport_attach(struct parport *port) + parport_release(device->pardev); + err_unregister_dev: + parport_unregister_device(device->pardev); +-err_free: +- ida_simple_remove(&pps_client_index, index); ++err_free_ida: ++ ida_free(&pps_client_index, index); ++err_free_device: + kfree(device); + } + +@@ -208,7 +212,7 @@ static void parport_detach(struct parport *port) + pps_unregister_source(device->pps); + parport_release(pardev); + parport_unregister_device(pardev); +- ida_simple_remove(&pps_client_index, device->index); ++ ida_free(&pps_client_index, device->index); + kfree(device); + } + +diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c +index 1b65e5e4e40ffc..59e71fd0db4390 100644 +--- a/drivers/regulator/of_regulator.c ++++ b/drivers/regulator/of_regulator.c +@@ -768,7 +768,7 @@ int of_regulator_bulk_get_all(struct device *dev, struct device_node *np, + name[i] = '\0'; + tmp = regulator_get(dev, name); + if (IS_ERR(tmp)) { +- ret = -EINVAL; ++ ret = PTR_ERR(tmp); + goto error; + } + (*consumers)[n].consumer = tmp; +diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c +index cfee164dd645ca..d68d4b22f528e2 100644 +--- a/drivers/remoteproc/imx_rproc.c ++++ b/drivers/remoteproc/imx_rproc.c +@@ -213,7 +213,7 @@ static const struct imx_rproc_att imx_rproc_att_imx8mq[] = { + /* QSPI Code - alias */ + { 0x08000000, 0x08000000, 0x08000000, 0 }, + /* DDR (Code) - alias */ +- { 0x10000000, 0x80000000, 0x0FFE0000, 0 }, ++ { 0x10000000, 0x40000000, 0x0FFE0000, 0 }, + /* TCML */ + { 0x1FFE0000, 0x007E0000, 0x00020000, ATT_OWN | ATT_IOMEM}, + /* TCMU */ +@@ -1134,6 +1134,8 @@ static int imx_rproc_probe(struct platform_device *pdev) + goto err_put_rproc; + } + ++ INIT_WORK(&priv->rproc_work, imx_rproc_vq_work); ++ + ret = imx_rproc_xtr_mbox_init(rproc); + if (ret) + goto err_put_wkq; +@@ -1152,8 +1154,6 @@ static int imx_rproc_probe(struct platform_device *pdev) + if (ret) + goto err_put_scu; + +- INIT_WORK(&priv->rproc_work, imx_rproc_vq_work); +- + if (rproc->state != RPROC_DETACHED) + rproc->auto_boot = of_property_read_bool(np, "fsl,auto-boot"); + +diff --git a/drivers/reset/reset-berlin.c b/drivers/reset/reset-berlin.c +index 2537ec05eceefd..578fe867080ce0 100644 +--- a/drivers/reset/reset-berlin.c ++++ b/drivers/reset/reset-berlin.c +@@ -68,13 +68,14 @@ static int berlin_reset_xlate(struct reset_controller_dev *rcdev, + + static int berlin2_reset_probe(struct platform_device *pdev) + { +- struct device_node *parent_np = of_get_parent(pdev->dev.of_node); ++ struct device_node *parent_np; + struct berlin_reset_priv *priv; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + ++ parent_np = of_get_parent(pdev->dev.of_node); + priv->regmap = syscon_node_to_regmap(parent_np); + of_node_put(parent_np); + if (IS_ERR(priv->regmap)) +diff --git a/drivers/reset/reset-k210.c b/drivers/reset/reset-k210.c +index b62a2fd44e4e42..e77e4cca377dca 100644 +--- a/drivers/reset/reset-k210.c ++++ b/drivers/reset/reset-k210.c +@@ -90,7 +90,7 @@ static const struct reset_control_ops k210_rst_ops = { + static int k210_rst_probe(struct platform_device *pdev) + { + struct device *dev = &pdev->dev; +- struct device_node *parent_np = of_get_parent(dev->of_node); ++ struct device_node *parent_np; + struct k210_rst *ksr; + + dev_info(dev, "K210 reset controller\n"); +@@ -99,6 +99,7 @@ static int k210_rst_probe(struct platform_device *pdev) + if (!ksr) + return -ENOMEM; + ++ parent_np = of_get_parent(dev->of_node); + ksr->map = syscon_node_to_regmap(parent_np); + of_node_put(parent_np); + if (IS_ERR(ksr->map)) +diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c +index cea3a79d538e4b..00e245173320c3 100644 +--- a/drivers/scsi/NCR5380.c ++++ b/drivers/scsi/NCR5380.c +@@ -1485,6 +1485,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, + unsigned char **data) + { + struct NCR5380_hostdata *hostdata = shost_priv(instance); ++ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(hostdata->connected); + int c = *count; + unsigned char p = *phase; + unsigned char *d = *data; +@@ -1496,7 +1497,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, + return -1; + } + +- NCR5380_to_ncmd(hostdata->connected)->phase = p; ++ ncmd->phase = p; + + if (p & SR_IO) { + if (hostdata->read_overruns) +@@ -1608,45 +1609,44 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, + * request. + */ + +- if (hostdata->flags & FLAG_DMA_FIXUP) { +- if (p & SR_IO) { +- /* +- * The workaround was to transfer fewer bytes than we +- * intended to with the pseudo-DMA read function, wait for +- * the chip to latch the last byte, read it, and then disable +- * pseudo-DMA mode. +- * +- * After REQ is asserted, the NCR5380 asserts DRQ and ACK. +- * REQ is deasserted when ACK is asserted, and not reasserted +- * until ACK goes false. Since the NCR5380 won't lower ACK +- * until DACK is asserted, which won't happen unless we twiddle +- * the DMA port or we take the NCR5380 out of DMA mode, we +- * can guarantee that we won't handshake another extra +- * byte. +- */ +- +- if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, +- BASR_DRQ, BASR_DRQ, 0) < 0) { +- result = -1; +- shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n"); +- } +- if (NCR5380_poll_politely(hostdata, STATUS_REG, +- SR_REQ, 0, 0) < 0) { +- result = -1; +- shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n"); +- } +- d[*count - 1] = NCR5380_read(INPUT_DATA_REG); +- } else { +- /* +- * Wait for the last byte to be sent. If REQ is being asserted for +- * the byte we're interested, we'll ACK it and it will go false. +- */ +- if (NCR5380_poll_politely2(hostdata, +- BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, +- BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0) < 0) { +- result = -1; +- shost_printk(KERN_ERR, instance, "PDMA write: DRQ and phase timeout\n"); ++ if ((hostdata->flags & FLAG_DMA_FIXUP) && ++ (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) { ++ /* ++ * The workaround was to transfer fewer bytes than we ++ * intended to with the pseudo-DMA receive function, wait for ++ * the chip to latch the last byte, read it, and then disable ++ * DMA mode. ++ * ++ * After REQ is asserted, the NCR5380 asserts DRQ and ACK. ++ * REQ is deasserted when ACK is asserted, and not reasserted ++ * until ACK goes false. Since the NCR5380 won't lower ACK ++ * until DACK is asserted, which won't happen unless we twiddle ++ * the DMA port or we take the NCR5380 out of DMA mode, we ++ * can guarantee that we won't handshake another extra ++ * byte. ++ * ++ * If sending, wait for the last byte to be sent. If REQ is ++ * being asserted for the byte we're interested, we'll ACK it ++ * and it will go false. ++ */ ++ if (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, ++ BASR_DRQ, BASR_DRQ, 0)) { ++ if ((p & SR_IO) && ++ (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) { ++ if (!NCR5380_poll_politely(hostdata, STATUS_REG, ++ SR_REQ, 0, 0)) { ++ d[c] = NCR5380_read(INPUT_DATA_REG); ++ --ncmd->this_residual; ++ } else { ++ result = -1; ++ scmd_printk(KERN_ERR, hostdata->connected, ++ "PDMA fixup: !REQ timeout\n"); ++ } + } ++ } else if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH) { ++ result = -1; ++ scmd_printk(KERN_ERR, hostdata->connected, ++ "PDMA fixup: DRQ timeout\n"); + } + } + +diff --git a/drivers/scsi/elx/libefc/efc_nport.c b/drivers/scsi/elx/libefc/efc_nport.c +index 2e83a667901fec..1a7437f4328e87 100644 +--- a/drivers/scsi/elx/libefc/efc_nport.c ++++ b/drivers/scsi/elx/libefc/efc_nport.c +@@ -705,9 +705,9 @@ efc_nport_vport_del(struct efc *efc, struct efc_domain *domain, + spin_lock_irqsave(&efc->lock, flags); + list_for_each_entry(nport, &domain->nport_list, list_entry) { + if (nport->wwpn == wwpn && nport->wwnn == wwnn) { +- kref_put(&nport->ref, nport->release); + /* Shutdown this NPORT */ + efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL); ++ kref_put(&nport->ref, nport->release); + break; + } + } +diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c +index 2e511697fce3e9..2c88ce24d19a7a 100644 +--- a/drivers/scsi/mac_scsi.c ++++ b/drivers/scsi/mac_scsi.c +@@ -102,11 +102,15 @@ __setup("mac5380=", mac_scsi_setup); + * Linux SCSI drivers lack knowledge of the timing behaviour of SCSI targets + * so bus errors are unavoidable. + * +- * If a MOVE.B instruction faults, we assume that zero bytes were transferred +- * and simply retry. That assumption probably depends on target behaviour but +- * seems to hold up okay. The NOP provides synchronization: without it the +- * fault can sometimes occur after the program counter has moved past the +- * offending instruction. Post-increment addressing can't be used. ++ * If a MOVE.B instruction faults during a receive operation, we assume the ++ * target sent nothing and try again. That assumption probably depends on ++ * target firmware but it seems to hold up okay. If a fault happens during a ++ * send operation, the target may or may not have seen /ACK and got the byte. ++ * It's uncertain so the whole SCSI command gets retried. ++ * ++ * The NOP is needed for synchronization because the fault address in the ++ * exception stack frame may or may not be the instruction that actually ++ * caused the bus error. Post-increment addressing can't be used. + */ + + #define MOVE_BYTE(operands) \ +@@ -208,8 +212,6 @@ __setup("mac5380=", mac_scsi_setup); + ".previous \n" \ + : "+a" (addr), "+r" (n), "+r" (result) : "a" (io)) + +-#define MAC_PDMA_DELAY 32 +- + static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n) + { + unsigned char *addr = start; +@@ -245,22 +247,21 @@ static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n) + if (n >= 1) { + MOVE_BYTE("%0@,%3@"); + if (result) +- goto out; ++ return -1; + } + if (n >= 1 && ((unsigned long)addr & 1)) { + MOVE_BYTE("%0@,%3@"); + if (result) +- goto out; ++ return -2; + } + while (n >= 32) + MOVE_16_WORDS("%0@+,%3@"); + while (n >= 2) + MOVE_WORD("%0@+,%3@"); + if (result) +- return start - addr; /* Negated to indicate uncertain length */ ++ return start - addr - 1; /* Negated to indicate uncertain length */ + if (n == 1) + MOVE_BYTE("%0@,%3@"); +-out: + return addr - start; + } + +@@ -274,25 +275,56 @@ static inline void write_ctrl_reg(struct NCR5380_hostdata *hostdata, u32 value) + out_be32(hostdata->io + (CTRL_REG << 4), value); + } + ++static inline int macscsi_wait_for_drq(struct NCR5380_hostdata *hostdata) ++{ ++ unsigned int n = 1; /* effectively multiplies NCR5380_REG_POLL_TIME */ ++ unsigned char basr; ++ ++again: ++ basr = NCR5380_read(BUS_AND_STATUS_REG); ++ ++ if (!(basr & BASR_PHASE_MATCH)) ++ return 1; ++ ++ if (basr & BASR_IRQ) ++ return -1; ++ ++ if (basr & BASR_DRQ) ++ return 0; ++ ++ if (n-- == 0) { ++ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); ++ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, ++ "%s: DRQ timeout\n", __func__); ++ return -1; ++ } ++ ++ NCR5380_poll_politely2(hostdata, ++ BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, ++ BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0); ++ goto again; ++} ++ + static inline int macscsi_pread(struct NCR5380_hostdata *hostdata, + unsigned char *dst, int len) + { + u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4); + unsigned char *d = dst; +- int result = 0; + + hostdata->pdma_residual = len; + +- while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, +- BASR_DRQ | BASR_PHASE_MATCH, +- BASR_DRQ | BASR_PHASE_MATCH, 0)) { +- int bytes; ++ while (macscsi_wait_for_drq(hostdata) == 0) { ++ int bytes, chunk_bytes; + + if (macintosh_config->ident == MAC_MODEL_IIFX) + write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE | + CTRL_INTERRUPTS_ENABLE); + +- bytes = mac_pdma_recv(s, d, min(hostdata->pdma_residual, 512)); ++ chunk_bytes = min(hostdata->pdma_residual, 512); ++ bytes = mac_pdma_recv(s, d, chunk_bytes); ++ ++ if (macintosh_config->ident == MAC_MODEL_IIFX) ++ write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE); + + if (bytes > 0) { + d += bytes; +@@ -300,37 +332,25 @@ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata, + } + + if (hostdata->pdma_residual == 0) +- goto out; ++ break; + +- if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, +- BUS_AND_STATUS_REG, BASR_ACK, +- BASR_ACK, 0) < 0) +- scmd_printk(KERN_DEBUG, hostdata->connected, +- "%s: !REQ and !ACK\n", __func__); +- if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) +- goto out; ++ if (bytes > 0) ++ continue; + +- if (bytes == 0) +- udelay(MAC_PDMA_DELAY); ++ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); ++ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, ++ "%s: bus error [%d/%d] (%d/%d)\n", ++ __func__, d - dst, len, bytes, chunk_bytes); + +- if (bytes >= 0) ++ if (bytes == 0) + continue; + +- dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, +- "%s: bus error (%d/%d)\n", __func__, d - dst, len); +- NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); +- result = -1; +- goto out; ++ if (macscsi_wait_for_drq(hostdata) <= 0) ++ set_host_byte(hostdata->connected, DID_ERROR); ++ break; + } + +- scmd_printk(KERN_ERR, hostdata->connected, +- "%s: phase mismatch or !DRQ\n", __func__); +- NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); +- result = -1; +-out: +- if (macintosh_config->ident == MAC_MODEL_IIFX) +- write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE); +- return result; ++ return 0; + } + + static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata, +@@ -338,67 +358,47 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata, + { + unsigned char *s = src; + u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4); +- int result = 0; + + hostdata->pdma_residual = len; + +- while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, +- BASR_DRQ | BASR_PHASE_MATCH, +- BASR_DRQ | BASR_PHASE_MATCH, 0)) { +- int bytes; ++ while (macscsi_wait_for_drq(hostdata) == 0) { ++ int bytes, chunk_bytes; + + if (macintosh_config->ident == MAC_MODEL_IIFX) + write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE | + CTRL_INTERRUPTS_ENABLE); + +- bytes = mac_pdma_send(s, d, min(hostdata->pdma_residual, 512)); ++ chunk_bytes = min(hostdata->pdma_residual, 512); ++ bytes = mac_pdma_send(s, d, chunk_bytes); ++ ++ if (macintosh_config->ident == MAC_MODEL_IIFX) ++ write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE); + + if (bytes > 0) { + s += bytes; + hostdata->pdma_residual -= bytes; + } + +- if (hostdata->pdma_residual == 0) { +- if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG, +- TCR_LAST_BYTE_SENT, +- TCR_LAST_BYTE_SENT, +- 0) < 0) { +- scmd_printk(KERN_ERR, hostdata->connected, +- "%s: Last Byte Sent timeout\n", __func__); +- result = -1; +- } +- goto out; +- } ++ if (hostdata->pdma_residual == 0) ++ break; + +- if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, +- BUS_AND_STATUS_REG, BASR_ACK, +- BASR_ACK, 0) < 0) +- scmd_printk(KERN_DEBUG, hostdata->connected, +- "%s: !REQ and !ACK\n", __func__); +- if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) +- goto out; ++ if (bytes > 0) ++ continue; + +- if (bytes == 0) +- udelay(MAC_PDMA_DELAY); ++ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); ++ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, ++ "%s: bus error [%d/%d] (%d/%d)\n", ++ __func__, s - src, len, bytes, chunk_bytes); + +- if (bytes >= 0) ++ if (bytes == 0) + continue; + +- dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, +- "%s: bus error (%d/%d)\n", __func__, s - src, len); +- NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); +- result = -1; +- goto out; ++ if (macscsi_wait_for_drq(hostdata) <= 0) ++ set_host_byte(hostdata->connected, DID_ERROR); ++ break; + } + +- scmd_printk(KERN_ERR, hostdata->connected, +- "%s: phase mismatch or !DRQ\n", __func__); +- NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); +- result = -1; +-out: +- if (macintosh_config->ident == MAC_MODEL_IIFX) +- write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE); +- return result; ++ return 0; + } + + static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c +index 4fce7cc32cb2c0..b0a574c534c4c0 100644 +--- a/drivers/scsi/sd.c ++++ b/drivers/scsi/sd.c +@@ -3118,7 +3118,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) + rcu_read_lock(); + vpd = rcu_dereference(sdkp->device->vpd_pgb1); + +- if (!vpd || vpd->len < 8) { ++ if (!vpd || vpd->len <= 8) { + rcu_read_unlock(); + return; + } +diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c +index 868453b18c9aea..2ae64cda8bc9ea 100644 +--- a/drivers/scsi/smartpqi/smartpqi_init.c ++++ b/drivers/scsi/smartpqi/smartpqi_init.c +@@ -2355,14 +2355,6 @@ static inline void pqi_mask_device(u8 *scsi3addr) + scsi3addr[3] |= 0xc0; + } + +-static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device) +-{ +- if (pqi_is_logical_device(device)) +- return false; +- +- return (device->path_map & (device->path_map - 1)) != 0; +-} +- + static inline bool pqi_expose_device(struct pqi_scsi_dev *device) + { + return !device->is_physical_device || !pqi_skip_device(device->scsi3addr); +@@ -3259,14 +3251,12 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request) + int residual_count; + int xfer_count; + bool device_offline; +- struct pqi_scsi_dev *device; + + scmd = io_request->scmd; + error_info = io_request->error_info; + host_byte = DID_OK; + sense_data_length = 0; + device_offline = false; +- device = scmd->device->hostdata; + + switch (error_info->service_response) { + case PQI_AIO_SERV_RESPONSE_COMPLETE: +@@ -3291,14 +3281,8 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request) + break; + case PQI_AIO_STATUS_AIO_PATH_DISABLED: + pqi_aio_path_disabled(io_request); +- if (pqi_is_multipath_device(device)) { +- pqi_device_remove_start(device); +- host_byte = DID_NO_CONNECT; +- scsi_status = SAM_STAT_CHECK_CONDITION; +- } else { +- scsi_status = SAM_STAT_GOOD; +- io_request->status = -EAGAIN; +- } ++ scsi_status = SAM_STAT_GOOD; ++ io_request->status = -EAGAIN; + break; + case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: + case PQI_AIO_STATUS_INVALID_DEVICE: +diff --git a/drivers/soc/fsl/qe/tsa.c b/drivers/soc/fsl/qe/tsa.c +index e0527b9efd0508..1bbc9af1e50b86 100644 +--- a/drivers/soc/fsl/qe/tsa.c ++++ b/drivers/soc/fsl/qe/tsa.c +@@ -140,7 +140,7 @@ static inline void tsa_write32(void __iomem *addr, u32 val) + iowrite32be(val, addr); + } + +-static inline void tsa_write8(void __iomem *addr, u32 val) ++static inline void tsa_write8(void __iomem *addr, u8 val) + { + iowrite8(val, addr); + } +diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c +index f9fd6177118cac..577f1f25ab103c 100644 +--- a/drivers/soc/qcom/smd-rpm.c ++++ b/drivers/soc/qcom/smd-rpm.c +@@ -196,9 +196,6 @@ static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev) + { + struct qcom_smd_rpm *rpm; + +- if (!rpdev->dev.of_node) +- return -EINVAL; +- + rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL); + if (!rpm) + return -ENOMEM; +@@ -218,18 +215,38 @@ static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev) + of_platform_depopulate(&rpdev->dev); + } + +-static const struct rpmsg_device_id qcom_smd_rpm_id_table[] = { +- { .name = "rpm_requests", }, +- { /* sentinel */ } ++static const struct of_device_id qcom_smd_rpm_of_match[] = { ++ { .compatible = "qcom,rpm-apq8084" }, ++ { .compatible = "qcom,rpm-ipq6018" }, ++ { .compatible = "qcom,rpm-ipq9574" }, ++ { .compatible = "qcom,rpm-msm8226" }, ++ { .compatible = "qcom,rpm-msm8909" }, ++ { .compatible = "qcom,rpm-msm8916" }, ++ { .compatible = "qcom,rpm-msm8936" }, ++ { .compatible = "qcom,rpm-msm8953" }, ++ { .compatible = "qcom,rpm-msm8974" }, ++ { .compatible = "qcom,rpm-msm8976" }, ++ { .compatible = "qcom,rpm-msm8994" }, ++ { .compatible = "qcom,rpm-msm8996" }, ++ { .compatible = "qcom,rpm-msm8998" }, ++ { .compatible = "qcom,rpm-sdm660" }, ++ { .compatible = "qcom,rpm-sm6115" }, ++ { .compatible = "qcom,rpm-sm6125" }, ++ { .compatible = "qcom,rpm-sm6375" }, ++ { .compatible = "qcom,rpm-qcm2290" }, ++ { .compatible = "qcom,rpm-qcs404" }, ++ {} + }; +-MODULE_DEVICE_TABLE(rpmsg, qcom_smd_rpm_id_table); ++MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match); + + static struct rpmsg_driver qcom_smd_rpm_driver = { + .probe = qcom_smd_rpm_probe, + .remove = qcom_smd_rpm_remove, + .callback = qcom_smd_rpm_callback, +- .id_table = qcom_smd_rpm_id_table, +- .drv.name = "qcom_smd_rpm", ++ .drv = { ++ .name = "qcom_smd_rpm", ++ .of_match_table = qcom_smd_rpm_of_match, ++ }, + }; + + static int __init qcom_smd_rpm_init(void) +diff --git a/drivers/soc/versatile/soc-integrator.c b/drivers/soc/versatile/soc-integrator.c +index bab4ad87aa7500..d5099a3386b4fc 100644 +--- a/drivers/soc/versatile/soc-integrator.c ++++ b/drivers/soc/versatile/soc-integrator.c +@@ -113,6 +113,7 @@ static int __init integrator_soc_init(void) + return -ENODEV; + + syscon_regmap = syscon_node_to_regmap(np); ++ of_node_put(np); + if (IS_ERR(syscon_regmap)) + return PTR_ERR(syscon_regmap); + +diff --git a/drivers/soc/versatile/soc-realview.c b/drivers/soc/versatile/soc-realview.c +index c6876d232d8fd6..cf91abe07d38d0 100644 +--- a/drivers/soc/versatile/soc-realview.c ++++ b/drivers/soc/versatile/soc-realview.c +@@ -4,6 +4,7 @@ + * + * Author: Linus Walleij + */ ++#include + #include + #include + #include +@@ -81,6 +82,13 @@ static struct attribute *realview_attrs[] = { + + ATTRIBUTE_GROUPS(realview); + ++static void realview_soc_socdev_release(void *data) ++{ ++ struct soc_device *soc_dev = data; ++ ++ soc_device_unregister(soc_dev); ++} ++ + static int realview_soc_probe(struct platform_device *pdev) + { + struct regmap *syscon_regmap; +@@ -93,7 +101,7 @@ static int realview_soc_probe(struct platform_device *pdev) + if (IS_ERR(syscon_regmap)) + return PTR_ERR(syscon_regmap); + +- soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); ++ soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr), GFP_KERNEL); + if (!soc_dev_attr) + return -ENOMEM; + +@@ -106,10 +114,14 @@ static int realview_soc_probe(struct platform_device *pdev) + soc_dev_attr->family = "Versatile"; + soc_dev_attr->custom_attr_group = realview_groups[0]; + soc_dev = soc_device_register(soc_dev_attr); +- if (IS_ERR(soc_dev)) { +- kfree(soc_dev_attr); ++ if (IS_ERR(soc_dev)) + return -ENODEV; +- } ++ ++ ret = devm_add_action_or_reset(&pdev->dev, realview_soc_socdev_release, ++ soc_dev); ++ if (ret) ++ return ret; ++ + ret = regmap_read(syscon_regmap, REALVIEW_SYS_ID_OFFSET, + &realview_coreid); + if (ret) +diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c +index 4cc4f32ca44904..6f9e9d87167758 100644 +--- a/drivers/spi/atmel-quadspi.c ++++ b/drivers/spi/atmel-quadspi.c +@@ -375,9 +375,9 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq, + * If the QSPI controller is set in regular SPI mode, set it in + * Serial Memory Mode (SMM). + */ +- if (aq->mr != QSPI_MR_SMM) { +- atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR); +- aq->mr = QSPI_MR_SMM; ++ if (!(aq->mr & QSPI_MR_SMM)) { ++ aq->mr |= QSPI_MR_SMM; ++ atmel_qspi_write(aq->mr, aq, QSPI_MR); + } + + /* Clear pending interrupts */ +@@ -501,7 +501,8 @@ static int atmel_qspi_setup(struct spi_device *spi) + if (ret < 0) + return ret; + +- aq->scr = QSPI_SCR_SCBR(scbr); ++ aq->scr &= ~QSPI_SCR_SCBR_MASK; ++ aq->scr |= QSPI_SCR_SCBR(scbr); + atmel_qspi_write(aq->scr, aq, QSPI_SCR); + + pm_runtime_mark_last_busy(ctrl->dev.parent); +@@ -534,6 +535,7 @@ static int atmel_qspi_set_cs_timing(struct spi_device *spi) + if (ret < 0) + return ret; + ++ aq->scr &= ~QSPI_SCR_DLYBS_MASK; + aq->scr |= QSPI_SCR_DLYBS(cs_setup); + atmel_qspi_write(aq->scr, aq, QSPI_SCR); + +@@ -549,8 +551,8 @@ static void atmel_qspi_init(struct atmel_qspi *aq) + atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR); + + /* Set the QSPI controller by default in Serial Memory Mode */ +- atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR); +- aq->mr = QSPI_MR_SMM; ++ aq->mr |= QSPI_MR_SMM; ++ atmel_qspi_write(aq->mr, aq, QSPI_MR); + + /* Enable the QSPI controller */ + atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR); +@@ -726,6 +728,7 @@ static void atmel_qspi_remove(struct platform_device *pdev) + clk_unprepare(aq->pclk); + + pm_runtime_disable(&pdev->dev); ++ pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + } + +diff --git a/drivers/spi/spi-bcmbca-hsspi.c b/drivers/spi/spi-bcmbca-hsspi.c +index 9f64afd8164ea9..4965bc86d7f52a 100644 +--- a/drivers/spi/spi-bcmbca-hsspi.c ++++ b/drivers/spi/spi-bcmbca-hsspi.c +@@ -546,12 +546,14 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev) + goto out_put_host; + } + +- pm_runtime_enable(&pdev->dev); ++ ret = devm_pm_runtime_enable(&pdev->dev); ++ if (ret) ++ goto out_put_host; + + ret = sysfs_create_group(&pdev->dev.kobj, &bcmbca_hsspi_group); + if (ret) { + dev_err(&pdev->dev, "couldn't register sysfs group\n"); +- goto out_pm_disable; ++ goto out_put_host; + } + + /* register and we are done */ +@@ -565,8 +567,6 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev) + + out_sysgroup_disable: + sysfs_remove_group(&pdev->dev.kobj, &bcmbca_hsspi_group); +-out_pm_disable: +- pm_runtime_disable(&pdev->dev); + out_put_host: + spi_controller_put(host); + out_disable_pll_clk: +diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c +index f7e268cf908507..180cea7d381729 100644 +--- a/drivers/spi/spi-fsl-lpspi.c ++++ b/drivers/spi/spi-fsl-lpspi.c +@@ -988,6 +988,7 @@ static void fsl_lpspi_remove(struct platform_device *pdev) + + fsl_lpspi_dma_exit(controller); + ++ pm_runtime_dont_use_autosuspend(fsl_lpspi->dev); + pm_runtime_disable(fsl_lpspi->dev); + } + +diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c +index 93a9667f6bdcf1..bc6c086ddd43f4 100644 +--- a/drivers/spi/spi-nxp-fspi.c ++++ b/drivers/spi/spi-nxp-fspi.c +@@ -57,13 +57,6 @@ + #include + #include + +-/* +- * The driver only uses one single LUT entry, that is updated on +- * each call of exec_op(). Index 0 is preset at boot with a basic +- * read operation, so let's use the last entry (31). +- */ +-#define SEQID_LUT 31 +- + /* Registers used by the driver */ + #define FSPI_MCR0 0x00 + #define FSPI_MCR0_AHB_TIMEOUT(x) ((x) << 24) +@@ -263,9 +256,6 @@ + #define FSPI_TFDR 0x180 + + #define FSPI_LUT_BASE 0x200 +-#define FSPI_LUT_OFFSET (SEQID_LUT * 4 * 4) +-#define FSPI_LUT_REG(idx) \ +- (FSPI_LUT_BASE + FSPI_LUT_OFFSET + (idx) * 4) + + /* register map end */ + +@@ -341,6 +331,7 @@ struct nxp_fspi_devtype_data { + unsigned int txfifo; + unsigned int ahb_buf_size; + unsigned int quirks; ++ unsigned int lut_num; + bool little_endian; + }; + +@@ -349,6 +340,7 @@ static struct nxp_fspi_devtype_data lx2160a_data = { + .txfifo = SZ_1K, /* (128 * 64 bits) */ + .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ + .quirks = 0, ++ .lut_num = 32, + .little_endian = true, /* little-endian */ + }; + +@@ -357,6 +349,7 @@ static struct nxp_fspi_devtype_data imx8mm_data = { + .txfifo = SZ_1K, /* (128 * 64 bits) */ + .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ + .quirks = 0, ++ .lut_num = 32, + .little_endian = true, /* little-endian */ + }; + +@@ -365,6 +358,7 @@ static struct nxp_fspi_devtype_data imx8qxp_data = { + .txfifo = SZ_1K, /* (128 * 64 bits) */ + .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ + .quirks = 0, ++ .lut_num = 32, + .little_endian = true, /* little-endian */ + }; + +@@ -373,6 +367,16 @@ static struct nxp_fspi_devtype_data imx8dxl_data = { + .txfifo = SZ_1K, /* (128 * 64 bits) */ + .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ + .quirks = FSPI_QUIRK_USE_IP_ONLY, ++ .lut_num = 32, ++ .little_endian = true, /* little-endian */ ++}; ++ ++static struct nxp_fspi_devtype_data imx8ulp_data = { ++ .rxfifo = SZ_512, /* (64 * 64 bits) */ ++ .txfifo = SZ_1K, /* (128 * 64 bits) */ ++ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ ++ .quirks = 0, ++ .lut_num = 16, + .little_endian = true, /* little-endian */ + }; + +@@ -544,6 +548,8 @@ static void nxp_fspi_prepare_lut(struct nxp_fspi *f, + void __iomem *base = f->iobase; + u32 lutval[4] = {}; + int lutidx = 1, i; ++ u32 lut_offset = (f->devtype_data->lut_num - 1) * 4 * 4; ++ u32 target_lut_reg; + + /* cmd */ + lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth), +@@ -588,8 +594,10 @@ static void nxp_fspi_prepare_lut(struct nxp_fspi *f, + fspi_writel(f, FSPI_LCKER_UNLOCK, f->iobase + FSPI_LCKCR); + + /* fill LUT */ +- for (i = 0; i < ARRAY_SIZE(lutval); i++) +- fspi_writel(f, lutval[i], base + FSPI_LUT_REG(i)); ++ for (i = 0; i < ARRAY_SIZE(lutval); i++) { ++ target_lut_reg = FSPI_LUT_BASE + lut_offset + i * 4; ++ fspi_writel(f, lutval[i], base + target_lut_reg); ++ } + + dev_dbg(f->dev, "CMD[%x] lutval[0:%x \t 1:%x \t 2:%x \t 3:%x], size: 0x%08x\n", + op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3], op->data.nbytes); +@@ -876,7 +884,7 @@ static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op) + void __iomem *base = f->iobase; + int seqnum = 0; + int err = 0; +- u32 reg; ++ u32 reg, seqid_lut; + + reg = fspi_readl(f, base + FSPI_IPRXFCR); + /* invalid RXFIFO first */ +@@ -892,8 +900,9 @@ static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op) + * the LUT at each exec_op() call. And also specify the DATA + * length, since it's has not been specified in the LUT. + */ ++ seqid_lut = f->devtype_data->lut_num - 1; + fspi_writel(f, op->data.nbytes | +- (SEQID_LUT << FSPI_IPCR1_SEQID_SHIFT) | ++ (seqid_lut << FSPI_IPCR1_SEQID_SHIFT) | + (seqnum << FSPI_IPCR1_SEQNUM_SHIFT), + base + FSPI_IPCR1); + +@@ -1017,7 +1026,7 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f) + { + void __iomem *base = f->iobase; + int ret, i; +- u32 reg; ++ u32 reg, seqid_lut; + + /* disable and unprepare clock to avoid glitch pass to controller */ + nxp_fspi_clk_disable_unprep(f); +@@ -1092,11 +1101,17 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f) + fspi_writel(f, reg, base + FSPI_FLSHB1CR1); + fspi_writel(f, reg, base + FSPI_FLSHB2CR1); + ++ /* ++ * The driver only uses one single LUT entry, that is updated on ++ * each call of exec_op(). Index 0 is preset at boot with a basic ++ * read operation, so let's use the last entry. ++ */ ++ seqid_lut = f->devtype_data->lut_num - 1; + /* AHB Read - Set lut sequence ID for all CS. */ +- fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA1CR2); +- fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA2CR2); +- fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB1CR2); +- fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB2CR2); ++ fspi_writel(f, seqid_lut, base + FSPI_FLSHA1CR2); ++ fspi_writel(f, seqid_lut, base + FSPI_FLSHA2CR2); ++ fspi_writel(f, seqid_lut, base + FSPI_FLSHB1CR2); ++ fspi_writel(f, seqid_lut, base + FSPI_FLSHB2CR2); + + f->selected = -1; + +@@ -1291,6 +1306,7 @@ static const struct of_device_id nxp_fspi_dt_ids[] = { + { .compatible = "nxp,imx8mp-fspi", .data = (void *)&imx8mm_data, }, + { .compatible = "nxp,imx8qxp-fspi", .data = (void *)&imx8qxp_data, }, + { .compatible = "nxp,imx8dxl-fspi", .data = (void *)&imx8dxl_data, }, ++ { .compatible = "nxp,imx8ulp-fspi", .data = (void *)&imx8ulp_data, }, + { /* sentinel */ } + }; + MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids); +diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c +index e982d3189fdce8..1381563941fe3e 100644 +--- a/drivers/spi/spi-ppc4xx.c ++++ b/drivers/spi/spi-ppc4xx.c +@@ -26,7 +26,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -410,7 +409,11 @@ static int spi_ppc4xx_of_probe(struct platform_device *op) + } + + /* Request IRQ */ +- hw->irqnum = irq_of_parse_and_map(np, 0); ++ ret = platform_get_irq(op, 0); ++ if (ret < 0) ++ goto free_host; ++ hw->irqnum = ret; ++ + ret = request_irq(hw->irqnum, spi_ppc4xx_int, + 0, "spi_ppc4xx_of", (void *)hw); + if (ret) { +diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c +index f9f40c0e9addc4..52cb1a3bb8c786 100644 +--- a/drivers/thunderbolt/switch.c ++++ b/drivers/thunderbolt/switch.c +@@ -921,6 +921,48 @@ int tb_port_get_link_speed(struct tb_port *port) + } + } + ++/** ++ * tb_port_get_link_generation() - Returns link generation ++ * @port: Lane adapter ++ * ++ * Returns link generation as number or negative errno in case of ++ * failure. Does not distinguish between Thunderbolt 1 and Thunderbolt 2 ++ * links so for those always returns 2. ++ */ ++int tb_port_get_link_generation(struct tb_port *port) ++{ ++ int ret; ++ ++ ret = tb_port_get_link_speed(port); ++ if (ret < 0) ++ return ret; ++ ++ switch (ret) { ++ case 40: ++ return 4; ++ case 20: ++ return 3; ++ default: ++ return 2; ++ } ++} ++ ++static const char *width_name(enum tb_link_width width) ++{ ++ switch (width) { ++ case TB_LINK_WIDTH_SINGLE: ++ return "symmetric, single lane"; ++ case TB_LINK_WIDTH_DUAL: ++ return "symmetric, dual lanes"; ++ case TB_LINK_WIDTH_ASYM_TX: ++ return "asymmetric, 3 transmitters, 1 receiver"; ++ case TB_LINK_WIDTH_ASYM_RX: ++ return "asymmetric, 3 receivers, 1 transmitter"; ++ default: ++ return "unknown"; ++ } ++} ++ + /** + * tb_port_get_link_width() - Get current link width + * @port: Port to check (USB4 or CIO) +@@ -946,8 +988,15 @@ int tb_port_get_link_width(struct tb_port *port) + LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; + } + +-static bool tb_port_is_width_supported(struct tb_port *port, +- unsigned int width_mask) ++/** ++ * tb_port_width_supported() - Is the given link width supported ++ * @port: Port to check ++ * @width: Widths to check (bitmask) ++ * ++ * Can be called to any lane adapter. Checks if given @width is ++ * supported by the hardware and returns %true if it is. ++ */ ++bool tb_port_width_supported(struct tb_port *port, unsigned int width) + { + u32 phy, widths; + int ret; +@@ -955,20 +1004,23 @@ static bool tb_port_is_width_supported(struct tb_port *port, + if (!port->cap_phy) + return false; + ++ if (width & (TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX)) { ++ if (tb_port_get_link_generation(port) < 4 || ++ !usb4_port_asym_supported(port)) ++ return false; ++ } ++ + ret = tb_port_read(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_0, 1); + if (ret) + return false; + +- widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> +- LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; +- +- return widths & width_mask; +-} +- +-static bool is_gen4_link(struct tb_port *port) +-{ +- return tb_port_get_link_speed(port) > 20; ++ /* ++ * The field encoding is the same as &enum tb_link_width (which is ++ * passed to @width). ++ */ ++ widths = FIELD_GET(LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK, phy); ++ return widths & width; + } + + /** +@@ -998,15 +1050,23 @@ int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width) + switch (width) { + case TB_LINK_WIDTH_SINGLE: + /* Gen 4 link cannot be single */ +- if (is_gen4_link(port)) ++ if (tb_port_get_link_generation(port) >= 4) + return -EOPNOTSUPP; + val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << + LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; + break; ++ + case TB_LINK_WIDTH_DUAL: ++ if (tb_port_get_link_generation(port) >= 4) ++ return usb4_port_asym_set_link_width(port, width); + val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << + LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; + break; ++ ++ case TB_LINK_WIDTH_ASYM_TX: ++ case TB_LINK_WIDTH_ASYM_RX: ++ return usb4_port_asym_set_link_width(port, width); ++ + default: + return -EINVAL; + } +@@ -1131,7 +1191,7 @@ void tb_port_lane_bonding_disable(struct tb_port *port) + /** + * tb_port_wait_for_link_width() - Wait until link reaches specific width + * @port: Port to wait for +- * @width_mask: Expected link width mask ++ * @width: Expected link width (bitmask) + * @timeout_msec: Timeout in ms how long to wait + * + * Should be used after both ends of the link have been bonded (or +@@ -1140,14 +1200,15 @@ void tb_port_lane_bonding_disable(struct tb_port *port) + * within the given timeout, %0 if it did. Can be passed a mask of + * expected widths and succeeds if any of the widths is reached. + */ +-int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask, ++int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width, + int timeout_msec) + { + ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); + int ret; + + /* Gen 4 link does not support single lane */ +- if ((width_mask & TB_LINK_WIDTH_SINGLE) && is_gen4_link(port)) ++ if ((width & TB_LINK_WIDTH_SINGLE) && ++ tb_port_get_link_generation(port) >= 4) + return -EOPNOTSUPP; + + do { +@@ -1160,7 +1221,7 @@ int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask, + */ + if (ret != -EACCES) + return ret; +- } else if (ret & width_mask) { ++ } else if (ret & width) { + return 0; + } + +@@ -1210,6 +1271,9 @@ int tb_port_update_credits(struct tb_port *port) + ret = tb_port_do_update_credits(port); + if (ret) + return ret; ++ ++ if (!port->dual_link_port) ++ return 0; + return tb_port_do_update_credits(port->dual_link_port); + } + +@@ -2799,6 +2863,38 @@ static int tb_switch_update_link_attributes(struct tb_switch *sw) + return 0; + } + ++/* Must be called after tb_switch_update_link_attributes() */ ++static void tb_switch_link_init(struct tb_switch *sw) ++{ ++ struct tb_port *up, *down; ++ bool bonded; ++ ++ if (!tb_route(sw) || tb_switch_is_icm(sw)) ++ return; ++ ++ tb_sw_dbg(sw, "current link speed %u.0 Gb/s\n", sw->link_speed); ++ tb_sw_dbg(sw, "current link width %s\n", width_name(sw->link_width)); ++ ++ bonded = sw->link_width >= TB_LINK_WIDTH_DUAL; ++ ++ /* ++ * Gen 4 links come up as bonded so update the port structures ++ * accordingly. ++ */ ++ up = tb_upstream_port(sw); ++ down = tb_switch_downstream_port(sw); ++ ++ up->bonded = bonded; ++ if (up->dual_link_port) ++ up->dual_link_port->bonded = bonded; ++ tb_port_update_credits(up); ++ ++ down->bonded = bonded; ++ if (down->dual_link_port) ++ down->dual_link_port->bonded = bonded; ++ tb_port_update_credits(down); ++} ++ + /** + * tb_switch_lane_bonding_enable() - Enable lane bonding + * @sw: Switch to enable lane bonding +@@ -2807,24 +2903,20 @@ static int tb_switch_update_link_attributes(struct tb_switch *sw) + * switch. If conditions are correct and both switches support the feature, + * lanes are bonded. It is safe to call this to any switch. + */ +-int tb_switch_lane_bonding_enable(struct tb_switch *sw) ++static int tb_switch_lane_bonding_enable(struct tb_switch *sw) + { + struct tb_port *up, *down; +- u64 route = tb_route(sw); +- unsigned int width_mask; ++ unsigned int width; + int ret; + +- if (!route) +- return 0; +- + if (!tb_switch_lane_bonding_possible(sw)) + return 0; + + up = tb_upstream_port(sw); + down = tb_switch_downstream_port(sw); + +- if (!tb_port_is_width_supported(up, TB_LINK_WIDTH_DUAL) || +- !tb_port_is_width_supported(down, TB_LINK_WIDTH_DUAL)) ++ if (!tb_port_width_supported(up, TB_LINK_WIDTH_DUAL) || ++ !tb_port_width_supported(down, TB_LINK_WIDTH_DUAL)) + return 0; + + /* +@@ -2848,21 +2940,10 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw) + } + + /* Any of the widths are all bonded */ +- width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX | +- TB_LINK_WIDTH_ASYM_RX; +- +- ret = tb_port_wait_for_link_width(down, width_mask, 100); +- if (ret) { +- tb_port_warn(down, "timeout enabling lane bonding\n"); +- return ret; +- } ++ width = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX | ++ TB_LINK_WIDTH_ASYM_RX; + +- tb_port_update_credits(down); +- tb_port_update_credits(up); +- tb_switch_update_link_attributes(sw); +- +- tb_sw_dbg(sw, "lane bonding enabled\n"); +- return ret; ++ return tb_port_wait_for_link_width(down, width, 100); + } + + /** +@@ -2872,20 +2953,27 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw) + * Disables lane bonding between @sw and parent. This can be called even + * if lanes were not bonded originally. + */ +-void tb_switch_lane_bonding_disable(struct tb_switch *sw) ++static int tb_switch_lane_bonding_disable(struct tb_switch *sw) + { + struct tb_port *up, *down; + int ret; + +- if (!tb_route(sw)) +- return; +- + up = tb_upstream_port(sw); + if (!up->bonded) +- return; ++ return 0; + +- down = tb_switch_downstream_port(sw); ++ /* ++ * If the link is Gen 4 there is no way to switch the link to ++ * two single lane links so avoid that here. Also don't bother ++ * if the link is not up anymore (sw is unplugged). ++ */ ++ ret = tb_port_get_link_generation(up); ++ if (ret < 0) ++ return ret; ++ if (ret >= 4) ++ return -EOPNOTSUPP; + ++ down = tb_switch_downstream_port(sw); + tb_port_lane_bonding_disable(up); + tb_port_lane_bonding_disable(down); + +@@ -2893,15 +2981,160 @@ void tb_switch_lane_bonding_disable(struct tb_switch *sw) + * It is fine if we get other errors as the router might have + * been unplugged. + */ +- ret = tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100); +- if (ret == -ETIMEDOUT) +- tb_sw_warn(sw, "timeout disabling lane bonding\n"); ++ return tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100); ++} ++ ++/* Note updating sw->link_width done in tb_switch_update_link_attributes() */ ++static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width) ++{ ++ struct tb_port *up, *down, *port; ++ enum tb_link_width down_width; ++ int ret; ++ ++ up = tb_upstream_port(sw); ++ down = tb_switch_downstream_port(sw); ++ ++ if (width == TB_LINK_WIDTH_ASYM_TX) { ++ down_width = TB_LINK_WIDTH_ASYM_RX; ++ port = down; ++ } else { ++ down_width = TB_LINK_WIDTH_ASYM_TX; ++ port = up; ++ } ++ ++ ret = tb_port_set_link_width(up, width); ++ if (ret) ++ return ret; ++ ++ ret = tb_port_set_link_width(down, down_width); ++ if (ret) ++ return ret; ++ ++ /* ++ * Initiate the change in the router that one of its TX lanes is ++ * changing to RX but do so only if there is an actual change. ++ */ ++ if (sw->link_width != width) { ++ ret = usb4_port_asym_start(port); ++ if (ret) ++ return ret; ++ ++ ret = tb_port_wait_for_link_width(up, width, 100); ++ if (ret) ++ return ret; ++ } ++ ++ return 0; ++} ++ ++/* Note updating sw->link_width done in tb_switch_update_link_attributes() */ ++static int tb_switch_asym_disable(struct tb_switch *sw) ++{ ++ struct tb_port *up, *down; ++ int ret; ++ ++ up = tb_upstream_port(sw); ++ down = tb_switch_downstream_port(sw); ++ ++ ret = tb_port_set_link_width(up, TB_LINK_WIDTH_DUAL); ++ if (ret) ++ return ret; ++ ++ ret = tb_port_set_link_width(down, TB_LINK_WIDTH_DUAL); ++ if (ret) ++ return ret; ++ ++ /* ++ * Initiate the change in the router that has three TX lanes and ++ * is changing one of its TX lanes to RX but only if there is a ++ * change in the link width. ++ */ ++ if (sw->link_width > TB_LINK_WIDTH_DUAL) { ++ if (sw->link_width == TB_LINK_WIDTH_ASYM_TX) ++ ret = usb4_port_asym_start(up); ++ else ++ ret = usb4_port_asym_start(down); ++ if (ret) ++ return ret; ++ ++ ret = tb_port_wait_for_link_width(up, TB_LINK_WIDTH_DUAL, 100); ++ if (ret) ++ return ret; ++ } ++ ++ return 0; ++} ++ ++/** ++ * tb_switch_set_link_width() - Configure router link width ++ * @sw: Router to configure ++ * @width: The new link width ++ * ++ * Set device router link width to @width from router upstream port ++ * perspective. Supports also asymmetric links if the routers boths side ++ * of the link supports it. ++ * ++ * Does nothing for host router. ++ * ++ * Returns %0 in case of success, negative errno otherwise. ++ */ ++int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width) ++{ ++ struct tb_port *up, *down; ++ int ret = 0; ++ ++ if (!tb_route(sw)) ++ return 0; ++ ++ up = tb_upstream_port(sw); ++ down = tb_switch_downstream_port(sw); ++ ++ switch (width) { ++ case TB_LINK_WIDTH_SINGLE: ++ ret = tb_switch_lane_bonding_disable(sw); ++ break; ++ ++ case TB_LINK_WIDTH_DUAL: ++ if (sw->link_width == TB_LINK_WIDTH_ASYM_TX || ++ sw->link_width == TB_LINK_WIDTH_ASYM_RX) { ++ ret = tb_switch_asym_disable(sw); ++ if (ret) ++ break; ++ } ++ ret = tb_switch_lane_bonding_enable(sw); ++ break; ++ ++ case TB_LINK_WIDTH_ASYM_TX: ++ case TB_LINK_WIDTH_ASYM_RX: ++ ret = tb_switch_asym_enable(sw, width); ++ break; ++ } ++ ++ switch (ret) { ++ case 0: ++ break; ++ ++ case -ETIMEDOUT: ++ tb_sw_warn(sw, "timeout changing link width\n"); ++ return ret; ++ ++ case -ENOTCONN: ++ case -EOPNOTSUPP: ++ case -ENODEV: ++ return ret; ++ ++ default: ++ tb_sw_dbg(sw, "failed to change link width: %d\n", ret); ++ return ret; ++ } + + tb_port_update_credits(down); + tb_port_update_credits(up); ++ + tb_switch_update_link_attributes(sw); + +- tb_sw_dbg(sw, "lane bonding disabled\n"); ++ tb_sw_dbg(sw, "link width set to %s\n", width_name(width)); ++ return ret; + } + + /** +@@ -3068,6 +3301,8 @@ int tb_switch_add(struct tb_switch *sw) + if (ret) + return ret; + ++ tb_switch_link_init(sw); ++ + ret = tb_switch_clx_init(sw); + if (ret) + return ret; +diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c +index c5e10c1d4c383c..ea155547e8719d 100644 +--- a/drivers/thunderbolt/tb.c ++++ b/drivers/thunderbolt/tb.c +@@ -16,8 +16,31 @@ + #include "tb_regs.h" + #include "tunnel.h" + +-#define TB_TIMEOUT 100 /* ms */ +-#define MAX_GROUPS 7 /* max Group_ID is 7 */ ++#define TB_TIMEOUT 100 /* ms */ ++ ++/* ++ * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver ++ * direction. This is 40G - 10% guard band bandwidth. ++ */ ++#define TB_ASYM_MIN (40000 * 90 / 100) ++ ++/* ++ * Threshold bandwidth (in Mb/s) that is used to switch the links to ++ * asymmetric and back. This is selected as 45G which means when the ++ * request is higher than this, we switch the link to asymmetric, and ++ * when it is less than this we switch it back. The 45G is selected so ++ * that we still have 27G (of the total 72G) for bulk PCIe traffic when ++ * switching back to symmetric. ++ */ ++#define TB_ASYM_THRESHOLD 45000 ++ ++#define MAX_GROUPS 7 /* max Group_ID is 7 */ ++ ++static unsigned int asym_threshold = TB_ASYM_THRESHOLD; ++module_param_named(asym_threshold, asym_threshold, uint, 0444); ++MODULE_PARM_DESC(asym_threshold, ++ "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: " ++ __MODULE_STRING(TB_ASYM_THRESHOLD) ")"); + + /** + * struct tb_cm - Simple Thunderbolt connection manager +@@ -255,13 +278,13 @@ static int tb_enable_clx(struct tb_switch *sw) + * this in the future to cover the whole topology if it turns + * out to be beneficial. + */ +- while (sw && sw->config.depth > 1) ++ while (sw && tb_switch_depth(sw) > 1) + sw = tb_switch_parent(sw); + + if (!sw) + return 0; + +- if (sw->config.depth != 1) ++ if (tb_switch_depth(sw) != 1) + return 0; + + /* +@@ -285,14 +308,32 @@ static int tb_enable_clx(struct tb_switch *sw) + return ret == -EOPNOTSUPP ? 0 : ret; + } + +-/* Disables CL states up to the host router */ +-static void tb_disable_clx(struct tb_switch *sw) ++/** ++ * tb_disable_clx() - Disable CL states up to host router ++ * @sw: Router to start ++ * ++ * Disables CL states from @sw up to the host router. Returns true if ++ * any CL state were disabled. This can be used to figure out whether ++ * the link was setup by us or the boot firmware so we don't ++ * accidentally enable them if they were not enabled during discovery. ++ */ ++static bool tb_disable_clx(struct tb_switch *sw) + { ++ bool disabled = false; ++ + do { +- if (tb_switch_clx_disable(sw) < 0) ++ int ret; ++ ++ ret = tb_switch_clx_disable(sw); ++ if (ret > 0) ++ disabled = true; ++ else if (ret < 0) + tb_sw_warn(sw, "failed to disable CL states\n"); ++ + sw = tb_switch_parent(sw); + } while (sw); ++ ++ return disabled; + } + + static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data) +@@ -553,7 +594,7 @@ static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb, + struct tb_switch *sw; + + /* Pick the router that is deepest in the topology */ +- if (dst_port->sw->config.depth > src_port->sw->config.depth) ++ if (tb_port_path_direction_downstream(src_port, dst_port)) + sw = dst_port->sw; + else + sw = src_port->sw; +@@ -572,133 +613,294 @@ static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb, + return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL); + } + +-static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port, +- struct tb_port *dst_port, int *available_up, int *available_down) +-{ +- int usb3_consumed_up, usb3_consumed_down, ret; +- struct tb_cm *tcm = tb_priv(tb); ++/** ++ * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link ++ * @tb: Domain structure ++ * @src_port: Source protocol adapter ++ * @dst_port: Destination protocol adapter ++ * @port: USB4 port the consumed bandwidth is calculated ++ * @consumed_up: Consumed upsream bandwidth (Mb/s) ++ * @consumed_down: Consumed downstream bandwidth (Mb/s) ++ * ++ * Calculates consumed USB3 and PCIe bandwidth at @port between path ++ * from @src_port to @dst_port. Does not take tunnel starting from ++ * @src_port and ending from @src_port into account. ++ */ ++static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb, ++ struct tb_port *src_port, ++ struct tb_port *dst_port, ++ struct tb_port *port, ++ int *consumed_up, ++ int *consumed_down) ++{ ++ int pci_consumed_up, pci_consumed_down; + struct tb_tunnel *tunnel; +- struct tb_port *port; + +- tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n", +- tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw), +- dst_port->port); ++ *consumed_up = *consumed_down = 0; + + tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); + if (tunnel && tunnel->src_port != src_port && + tunnel->dst_port != dst_port) { +- ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up, +- &usb3_consumed_down); ++ int ret; ++ ++ ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up, ++ consumed_down); + if (ret) + return ret; +- } else { +- usb3_consumed_up = 0; +- usb3_consumed_down = 0; + } + +- /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */ +- *available_up = *available_down = 120000; ++ /* ++ * If there is anything reserved for PCIe bulk traffic take it ++ * into account here too. ++ */ ++ if (tb_tunnel_reserved_pci(port, &pci_consumed_up, &pci_consumed_down)) { ++ *consumed_up += pci_consumed_up; ++ *consumed_down += pci_consumed_down; ++ } + +- /* Find the minimum available bandwidth over all links */ +- tb_for_each_port_on_path(src_port, dst_port, port) { +- int link_speed, link_width, up_bw, down_bw; ++ return 0; ++} + +- if (!tb_port_is_null(port)) ++/** ++ * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link ++ * @tb: Domain structure ++ * @src_port: Source protocol adapter ++ * @dst_port: Destination protocol adapter ++ * @port: USB4 port the consumed bandwidth is calculated ++ * @consumed_up: Consumed upsream bandwidth (Mb/s) ++ * @consumed_down: Consumed downstream bandwidth (Mb/s) ++ * ++ * Calculates consumed DP bandwidth at @port between path from @src_port ++ * to @dst_port. Does not take tunnel starting from @src_port and ending ++ * from @src_port into account. ++ */ ++static int tb_consumed_dp_bandwidth(struct tb *tb, ++ struct tb_port *src_port, ++ struct tb_port *dst_port, ++ struct tb_port *port, ++ int *consumed_up, ++ int *consumed_down) ++{ ++ struct tb_cm *tcm = tb_priv(tb); ++ struct tb_tunnel *tunnel; ++ int ret; ++ ++ *consumed_up = *consumed_down = 0; ++ ++ /* ++ * Find all DP tunnels that cross the port and reduce ++ * their consumed bandwidth from the available. ++ */ ++ list_for_each_entry(tunnel, &tcm->tunnel_list, list) { ++ int dp_consumed_up, dp_consumed_down; ++ ++ if (tb_tunnel_is_invalid(tunnel)) ++ continue; ++ ++ if (!tb_tunnel_is_dp(tunnel)) + continue; + +- if (tb_is_upstream_port(port)) { +- link_speed = port->sw->link_speed; ++ if (!tb_tunnel_port_on_path(tunnel, port)) ++ continue; ++ ++ /* ++ * Ignore the DP tunnel between src_port and dst_port ++ * because it is the same tunnel and we may be ++ * re-calculating estimated bandwidth. ++ */ ++ if (tunnel->src_port == src_port && ++ tunnel->dst_port == dst_port) ++ continue; ++ ++ ret = tb_tunnel_consumed_bandwidth(tunnel, &dp_consumed_up, ++ &dp_consumed_down); ++ if (ret) ++ return ret; ++ ++ *consumed_up += dp_consumed_up; ++ *consumed_down += dp_consumed_down; ++ } ++ ++ return 0; ++} ++ ++static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port, ++ struct tb_port *port) ++{ ++ bool downstream = tb_port_path_direction_downstream(src_port, dst_port); ++ enum tb_link_width width; ++ ++ if (tb_is_upstream_port(port)) ++ width = downstream ? TB_LINK_WIDTH_ASYM_RX : TB_LINK_WIDTH_ASYM_TX; ++ else ++ width = downstream ? TB_LINK_WIDTH_ASYM_TX : TB_LINK_WIDTH_ASYM_RX; ++ ++ return tb_port_width_supported(port, width); ++} ++ ++/** ++ * tb_maximum_banwidth() - Maximum bandwidth over a single link ++ * @tb: Domain structure ++ * @src_port: Source protocol adapter ++ * @dst_port: Destination protocol adapter ++ * @port: USB4 port the total bandwidth is calculated ++ * @max_up: Maximum upstream bandwidth (Mb/s) ++ * @max_down: Maximum downstream bandwidth (Mb/s) ++ * @include_asym: Include bandwidth if the link is switched from ++ * symmetric to asymmetric ++ * ++ * Returns maximum possible bandwidth in @max_up and @max_down over a ++ * single link at @port. If @include_asym is set then includes the ++ * additional banwdith if the links are transitioned into asymmetric to ++ * direction from @src_port to @dst_port. ++ */ ++static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port, ++ struct tb_port *dst_port, struct tb_port *port, ++ int *max_up, int *max_down, bool include_asym) ++{ ++ bool downstream = tb_port_path_direction_downstream(src_port, dst_port); ++ int link_speed, link_width, up_bw, down_bw; ++ ++ /* ++ * Can include asymmetric, only if it is actually supported by ++ * the lane adapter. ++ */ ++ if (!tb_asym_supported(src_port, dst_port, port)) ++ include_asym = false; ++ ++ if (tb_is_upstream_port(port)) { ++ link_speed = port->sw->link_speed; ++ /* ++ * sw->link_width is from upstream perspective so we use ++ * the opposite for downstream of the host router. ++ */ ++ if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) { ++ up_bw = link_speed * 3 * 1000; ++ down_bw = link_speed * 1 * 1000; ++ } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) { ++ up_bw = link_speed * 1 * 1000; ++ down_bw = link_speed * 3 * 1000; ++ } else if (include_asym) { + /* +- * sw->link_width is from upstream perspective +- * so we use the opposite for downstream of the +- * host router. ++ * The link is symmetric at the moment but we ++ * can switch it to asymmetric as needed. Report ++ * this bandwidth as available (even though it ++ * is not yet enabled). + */ +- if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) { +- up_bw = link_speed * 3 * 1000; +- down_bw = link_speed * 1 * 1000; +- } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) { ++ if (downstream) { + up_bw = link_speed * 1 * 1000; + down_bw = link_speed * 3 * 1000; + } else { +- up_bw = link_speed * port->sw->link_width * 1000; +- down_bw = up_bw; ++ up_bw = link_speed * 3 * 1000; ++ down_bw = link_speed * 1 * 1000; + } + } else { +- link_speed = tb_port_get_link_speed(port); +- if (link_speed < 0) +- return link_speed; +- +- link_width = tb_port_get_link_width(port); +- if (link_width < 0) +- return link_width; +- +- if (link_width == TB_LINK_WIDTH_ASYM_TX) { ++ up_bw = link_speed * port->sw->link_width * 1000; ++ down_bw = up_bw; ++ } ++ } else { ++ link_speed = tb_port_get_link_speed(port); ++ if (link_speed < 0) ++ return link_speed; ++ ++ link_width = tb_port_get_link_width(port); ++ if (link_width < 0) ++ return link_width; ++ ++ if (link_width == TB_LINK_WIDTH_ASYM_TX) { ++ up_bw = link_speed * 1 * 1000; ++ down_bw = link_speed * 3 * 1000; ++ } else if (link_width == TB_LINK_WIDTH_ASYM_RX) { ++ up_bw = link_speed * 3 * 1000; ++ down_bw = link_speed * 1 * 1000; ++ } else if (include_asym) { ++ /* ++ * The link is symmetric at the moment but we ++ * can switch it to asymmetric as needed. Report ++ * this bandwidth as available (even though it ++ * is not yet enabled). ++ */ ++ if (downstream) { + up_bw = link_speed * 1 * 1000; + down_bw = link_speed * 3 * 1000; +- } else if (link_width == TB_LINK_WIDTH_ASYM_RX) { ++ } else { + up_bw = link_speed * 3 * 1000; + down_bw = link_speed * 1 * 1000; +- } else { +- up_bw = link_speed * link_width * 1000; +- down_bw = up_bw; + } ++ } else { ++ up_bw = link_speed * link_width * 1000; ++ down_bw = up_bw; + } ++ } + +- /* Leave 10% guard band */ +- up_bw -= up_bw / 10; +- down_bw -= down_bw / 10; +- +- tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw, +- down_bw); ++ /* Leave 10% guard band */ ++ *max_up = up_bw - up_bw / 10; ++ *max_down = down_bw - down_bw / 10; + +- /* +- * Find all DP tunnels that cross the port and reduce +- * their consumed bandwidth from the available. +- */ +- list_for_each_entry(tunnel, &tcm->tunnel_list, list) { +- int dp_consumed_up, dp_consumed_down; ++ tb_port_dbg(port, "link maximum bandwidth %d/%d Mb/s\n", *max_up, *max_down); ++ return 0; ++} + +- if (tb_tunnel_is_invalid(tunnel)) +- continue; ++/** ++ * tb_available_bandwidth() - Available bandwidth for tunneling ++ * @tb: Domain structure ++ * @src_port: Source protocol adapter ++ * @dst_port: Destination protocol adapter ++ * @available_up: Available bandwidth upstream (Mb/s) ++ * @available_down: Available bandwidth downstream (Mb/s) ++ * @include_asym: Include bandwidth if the link is switched from ++ * symmetric to asymmetric ++ * ++ * Calculates maximum available bandwidth for protocol tunneling between ++ * @src_port and @dst_port at the moment. This is minimum of maximum ++ * link bandwidth across all links reduced by currently consumed ++ * bandwidth on that link. ++ * ++ * If @include_asym is true then includes also bandwidth that can be ++ * added when the links are transitioned into asymmetric (but does not ++ * transition the links). ++ */ ++static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port, ++ struct tb_port *dst_port, int *available_up, ++ int *available_down, bool include_asym) ++{ ++ struct tb_port *port; ++ int ret; + +- if (!tb_tunnel_is_dp(tunnel)) +- continue; ++ /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */ ++ *available_up = *available_down = 120000; + +- if (!tb_tunnel_port_on_path(tunnel, port)) +- continue; ++ /* Find the minimum available bandwidth over all links */ ++ tb_for_each_port_on_path(src_port, dst_port, port) { ++ int max_up, max_down, consumed_up, consumed_down; + +- /* +- * Ignore the DP tunnel between src_port and +- * dst_port because it is the same tunnel and we +- * may be re-calculating estimated bandwidth. +- */ +- if (tunnel->src_port == src_port && +- tunnel->dst_port == dst_port) +- continue; ++ if (!tb_port_is_null(port)) ++ continue; + +- ret = tb_tunnel_consumed_bandwidth(tunnel, +- &dp_consumed_up, +- &dp_consumed_down); +- if (ret) +- return ret; ++ ret = tb_maximum_bandwidth(tb, src_port, dst_port, port, ++ &max_up, &max_down, include_asym); ++ if (ret) ++ return ret; + +- up_bw -= dp_consumed_up; +- down_bw -= dp_consumed_down; +- } ++ ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port, ++ port, &consumed_up, ++ &consumed_down); ++ if (ret) ++ return ret; ++ max_up -= consumed_up; ++ max_down -= consumed_down; + +- /* +- * If USB3 is tunneled from the host router down to the +- * branch leading to port we need to take USB3 consumed +- * bandwidth into account regardless whether it actually +- * crosses the port. +- */ +- up_bw -= usb3_consumed_up; +- down_bw -= usb3_consumed_down; ++ ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port, ++ &consumed_up, &consumed_down); ++ if (ret) ++ return ret; ++ max_up -= consumed_up; ++ max_down -= consumed_down; + +- if (up_bw < *available_up) +- *available_up = up_bw; +- if (down_bw < *available_down) +- *available_down = down_bw; ++ if (max_up < *available_up) ++ *available_up = max_up; ++ if (max_down < *available_down) ++ *available_down = max_down; + } + + if (*available_up < 0) +@@ -736,7 +938,7 @@ static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port, + * That determines the whole USB3 bandwidth for this branch. + */ + ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port, +- &available_up, &available_down); ++ &available_up, &available_down, false); + if (ret) { + tb_warn(tb, "failed to calculate available bandwidth\n"); + return; +@@ -794,8 +996,8 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) + return ret; + } + +- ret = tb_available_bandwidth(tb, down, up, &available_up, +- &available_down); ++ ret = tb_available_bandwidth(tb, down, up, &available_up, &available_down, ++ false); + if (ret) + goto err_reclaim; + +@@ -856,6 +1058,225 @@ static int tb_create_usb3_tunnels(struct tb_switch *sw) + return 0; + } + ++/** ++ * tb_configure_asym() - Transition links to asymmetric if needed ++ * @tb: Domain structure ++ * @src_port: Source adapter to start the transition ++ * @dst_port: Destination adapter ++ * @requested_up: Additional bandwidth (Mb/s) required upstream ++ * @requested_down: Additional bandwidth (Mb/s) required downstream ++ * ++ * Transition links between @src_port and @dst_port into asymmetric, with ++ * three lanes in the direction from @src_port towards @dst_port and one lane ++ * in the opposite direction, if the bandwidth requirements ++ * (requested + currently consumed) on that link exceed @asym_threshold. ++ * ++ * Must be called with available >= requested over all links. ++ */ ++static int tb_configure_asym(struct tb *tb, struct tb_port *src_port, ++ struct tb_port *dst_port, int requested_up, ++ int requested_down) ++{ ++ struct tb_switch *sw; ++ bool clx, downstream; ++ struct tb_port *up; ++ int ret = 0; ++ ++ if (!asym_threshold) ++ return 0; ++ ++ /* Disable CL states before doing any transitions */ ++ downstream = tb_port_path_direction_downstream(src_port, dst_port); ++ /* Pick up router deepest in the hierarchy */ ++ if (downstream) ++ sw = dst_port->sw; ++ else ++ sw = src_port->sw; ++ ++ clx = tb_disable_clx(sw); ++ ++ tb_for_each_upstream_port_on_path(src_port, dst_port, up) { ++ int consumed_up, consumed_down; ++ enum tb_link_width width; ++ ++ ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up, ++ &consumed_up, &consumed_down); ++ if (ret) ++ break; ++ ++ if (downstream) { ++ /* ++ * Downstream so make sure upstream is within the 36G ++ * (40G - guard band 10%), and the requested is above ++ * what the threshold is. ++ */ ++ if (consumed_up + requested_up >= TB_ASYM_MIN) { ++ ret = -ENOBUFS; ++ break; ++ } ++ /* Does consumed + requested exceed the threshold */ ++ if (consumed_down + requested_down < asym_threshold) ++ continue; ++ ++ width = TB_LINK_WIDTH_ASYM_RX; ++ } else { ++ /* Upstream, the opposite of above */ ++ if (consumed_down + requested_down >= TB_ASYM_MIN) { ++ ret = -ENOBUFS; ++ break; ++ } ++ if (consumed_up + requested_up < asym_threshold) ++ continue; ++ ++ width = TB_LINK_WIDTH_ASYM_TX; ++ } ++ ++ if (up->sw->link_width == width) ++ continue; ++ ++ if (!tb_port_width_supported(up, width)) ++ continue; ++ ++ tb_sw_dbg(up->sw, "configuring asymmetric link\n"); ++ ++ /* ++ * Here requested + consumed > threshold so we need to ++ * transtion the link into asymmetric now. ++ */ ++ ret = tb_switch_set_link_width(up->sw, width); ++ if (ret) { ++ tb_sw_warn(up->sw, "failed to set link width\n"); ++ break; ++ } ++ } ++ ++ /* Re-enable CL states if they were previosly enabled */ ++ if (clx) ++ tb_enable_clx(sw); ++ ++ return ret; ++} ++ ++/** ++ * tb_configure_sym() - Transition links to symmetric if possible ++ * @tb: Domain structure ++ * @src_port: Source adapter to start the transition ++ * @dst_port: Destination adapter ++ * @requested_up: New lower bandwidth request upstream (Mb/s) ++ * @requested_down: New lower bandwidth request downstream (Mb/s) ++ * ++ * Goes over each link from @src_port to @dst_port and tries to ++ * transition the link to symmetric if the currently consumed bandwidth ++ * allows. ++ */ ++static int tb_configure_sym(struct tb *tb, struct tb_port *src_port, ++ struct tb_port *dst_port, int requested_up, ++ int requested_down) ++{ ++ struct tb_switch *sw; ++ bool clx, downstream; ++ struct tb_port *up; ++ int ret = 0; ++ ++ if (!asym_threshold) ++ return 0; ++ ++ /* Disable CL states before doing any transitions */ ++ downstream = tb_port_path_direction_downstream(src_port, dst_port); ++ /* Pick up router deepest in the hierarchy */ ++ if (downstream) ++ sw = dst_port->sw; ++ else ++ sw = src_port->sw; ++ ++ clx = tb_disable_clx(sw); ++ ++ tb_for_each_upstream_port_on_path(src_port, dst_port, up) { ++ int consumed_up, consumed_down; ++ ++ /* Already symmetric */ ++ if (up->sw->link_width <= TB_LINK_WIDTH_DUAL) ++ continue; ++ /* Unplugged, no need to switch */ ++ if (up->sw->is_unplugged) ++ continue; ++ ++ ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up, ++ &consumed_up, &consumed_down); ++ if (ret) ++ break; ++ ++ if (downstream) { ++ /* ++ * Downstream so we want the consumed_down < threshold. ++ * Upstream traffic should be less than 36G (40G ++ * guard band 10%) as the link was configured asymmetric ++ * already. ++ */ ++ if (consumed_down + requested_down >= asym_threshold) ++ continue; ++ } else { ++ if (consumed_up + requested_up >= asym_threshold) ++ continue; ++ } ++ ++ if (up->sw->link_width == TB_LINK_WIDTH_DUAL) ++ continue; ++ ++ tb_sw_dbg(up->sw, "configuring symmetric link\n"); ++ ++ ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL); ++ if (ret) { ++ tb_sw_warn(up->sw, "failed to set link width\n"); ++ break; ++ } ++ } ++ ++ /* Re-enable CL states if they were previosly enabled */ ++ if (clx) ++ tb_enable_clx(sw); ++ ++ return ret; ++} ++ ++static void tb_configure_link(struct tb_port *down, struct tb_port *up, ++ struct tb_switch *sw) ++{ ++ struct tb *tb = sw->tb; ++ ++ /* Link the routers using both links if available */ ++ down->remote = up; ++ up->remote = down; ++ if (down->dual_link_port && up->dual_link_port) { ++ down->dual_link_port->remote = up->dual_link_port; ++ up->dual_link_port->remote = down->dual_link_port; ++ } ++ ++ /* ++ * Enable lane bonding if the link is currently two single lane ++ * links. ++ */ ++ if (sw->link_width < TB_LINK_WIDTH_DUAL) ++ tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL); ++ ++ /* ++ * Device router that comes up as symmetric link is ++ * connected deeper in the hierarchy, we transition the links ++ * above into symmetric if bandwidth allows. ++ */ ++ if (tb_switch_depth(sw) > 1 && ++ tb_port_get_link_generation(up) >= 4 && ++ up->sw->link_width == TB_LINK_WIDTH_DUAL) { ++ struct tb_port *host_port; ++ ++ host_port = tb_port_at(tb_route(sw), tb->root_switch); ++ tb_configure_sym(tb, host_port, up, 0, 0); ++ } ++ ++ /* Set the link configured */ ++ tb_switch_configure_link(sw); ++} ++ + static void tb_scan_port(struct tb_port *port); + + /* +@@ -964,19 +1385,9 @@ static void tb_scan_port(struct tb_port *port) + goto out_rpm_put; + } + +- /* Link the switches using both links if available */ + upstream_port = tb_upstream_port(sw); +- port->remote = upstream_port; +- upstream_port->remote = port; +- if (port->dual_link_port && upstream_port->dual_link_port) { +- port->dual_link_port->remote = upstream_port->dual_link_port; +- upstream_port->dual_link_port->remote = port->dual_link_port; +- } ++ tb_configure_link(port, upstream_port, sw); + +- /* Enable lane bonding if supported */ +- tb_switch_lane_bonding_enable(sw); +- /* Set the link configured */ +- tb_switch_configure_link(sw); + /* + * CL0s and CL1 are enabled and supported together. + * Silently ignore CLx enabling in case CLx is not supported. +@@ -1040,6 +1451,11 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) + * deallocated properly. + */ + tb_switch_dealloc_dp_resource(src_port->sw, src_port); ++ /* ++ * If bandwidth on a link is < asym_threshold ++ * transition the link to symmetric. ++ */ ++ tb_configure_sym(tb, src_port, dst_port, 0, 0); + /* Now we can allow the domain to runtime suspend again */ + pm_runtime_mark_last_busy(&dst_port->sw->dev); + pm_runtime_put_autosuspend(&dst_port->sw->dev); +@@ -1092,7 +1508,8 @@ static void tb_free_unplugged_children(struct tb_switch *sw) + tb_retimer_remove_all(port); + tb_remove_dp_resources(port->remote->sw); + tb_switch_unconfigure_link(port->remote->sw); +- tb_switch_lane_bonding_disable(port->remote->sw); ++ tb_switch_set_link_width(port->remote->sw, ++ TB_LINK_WIDTH_SINGLE); + tb_switch_remove(port->remote->sw); + port->remote = NULL; + if (port->dual_link_port) +@@ -1196,7 +1613,7 @@ tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group) + + out = tunnel->dst_port; + ret = tb_available_bandwidth(tb, in, out, &estimated_up, +- &estimated_down); ++ &estimated_down, true); + if (ret) { + tb_port_warn(in, + "failed to re-calculate estimated bandwidth\n"); +@@ -1212,7 +1629,7 @@ tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group) + tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n", + estimated_up, estimated_down); + +- if (in->sw->config.depth < out->sw->config.depth) ++ if (tb_port_path_direction_downstream(in, out)) + estimated_bw = estimated_down; + else + estimated_bw = estimated_up; +@@ -1282,53 +1699,14 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in) + return NULL; + } + +-static void tb_tunnel_dp(struct tb *tb) ++static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in, ++ struct tb_port *out) + { + int available_up, available_down, ret, link_nr; + struct tb_cm *tcm = tb_priv(tb); +- struct tb_port *port, *in, *out; ++ int consumed_up, consumed_down; + struct tb_tunnel *tunnel; + +- if (!tb_acpi_may_tunnel_dp()) { +- tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n"); +- return; +- } +- +- /* +- * Find pair of inactive DP IN and DP OUT adapters and then +- * establish a DP tunnel between them. +- */ +- tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); +- +- in = NULL; +- out = NULL; +- list_for_each_entry(port, &tcm->dp_resources, list) { +- if (!tb_port_is_dpin(port)) +- continue; +- +- if (tb_port_is_enabled(port)) { +- tb_port_dbg(port, "DP IN in use\n"); +- continue; +- } +- +- tb_port_dbg(port, "DP IN available\n"); +- +- out = tb_find_dp_out(tb, port); +- if (out) { +- in = port; +- break; +- } +- } +- +- if (!in) { +- tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); +- return; +- } +- if (!out) { +- tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n"); +- return; +- } +- + /* + * This is only applicable to links that are not bonded (so + * when Thunderbolt 1 hardware is involved somewhere in the +@@ -1369,7 +1747,8 @@ static void tb_tunnel_dp(struct tb *tb) + goto err_detach_group; + } + +- ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down); ++ ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down, ++ true); + if (ret) + goto err_reclaim_usb; + +@@ -1388,8 +1767,19 @@ static void tb_tunnel_dp(struct tb *tb) + goto err_free; + } + ++ /* If fail reading tunnel's consumed bandwidth, tear it down */ ++ ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down); ++ if (ret) ++ goto err_deactivate; ++ + list_add_tail(&tunnel->list, &tcm->tunnel_list); ++ + tb_reclaim_usb3_bandwidth(tb, in, out); ++ /* ++ * Transition the links to asymmetric if the consumption exceeds ++ * the threshold. ++ */ ++ tb_configure_asym(tb, in, out, consumed_up, consumed_down); + + /* Update the domain with the new bandwidth estimation */ + tb_recalc_estimated_bandwidth(tb); +@@ -1399,8 +1789,10 @@ static void tb_tunnel_dp(struct tb *tb) + * TMU mode to HiFi for CL0s to work. + */ + tb_increase_tmu_accuracy(tunnel); +- return; ++ return true; + ++err_deactivate: ++ tb_tunnel_deactivate(tunnel); + err_free: + tb_tunnel_free(tunnel); + err_reclaim_usb: +@@ -1414,6 +1806,49 @@ static void tb_tunnel_dp(struct tb *tb) + pm_runtime_put_autosuspend(&out->sw->dev); + pm_runtime_mark_last_busy(&in->sw->dev); + pm_runtime_put_autosuspend(&in->sw->dev); ++ ++ return false; ++} ++ ++static void tb_tunnel_dp(struct tb *tb) ++{ ++ struct tb_cm *tcm = tb_priv(tb); ++ struct tb_port *port, *in, *out; ++ ++ if (!tb_acpi_may_tunnel_dp()) { ++ tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n"); ++ return; ++ } ++ ++ /* ++ * Find pair of inactive DP IN and DP OUT adapters and then ++ * establish a DP tunnel between them. ++ */ ++ tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); ++ ++ in = NULL; ++ out = NULL; ++ list_for_each_entry(port, &tcm->dp_resources, list) { ++ if (!tb_port_is_dpin(port)) ++ continue; ++ ++ if (tb_port_is_enabled(port)) { ++ tb_port_dbg(port, "DP IN in use\n"); ++ continue; ++ } ++ ++ in = port; ++ tb_port_dbg(in, "DP IN available\n"); ++ ++ out = tb_find_dp_out(tb, port); ++ if (out) ++ tb_tunnel_one_dp(tb, in, out); ++ else ++ tb_port_dbg(in, "no suitable DP OUT adapter available, not tunneling\n"); ++ } ++ ++ if (!in) ++ tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); + } + + static void tb_enter_redrive(struct tb_port *port) +@@ -1748,7 +2183,8 @@ static void tb_handle_hotplug(struct work_struct *work) + tb_remove_dp_resources(port->remote->sw); + tb_switch_tmu_disable(port->remote->sw); + tb_switch_unconfigure_link(port->remote->sw); +- tb_switch_lane_bonding_disable(port->remote->sw); ++ tb_switch_set_link_width(port->remote->sw, ++ TB_LINK_WIDTH_SINGLE); + tb_switch_remove(port->remote->sw); + port->remote = NULL; + if (port->dual_link_port) +@@ -1883,6 +2319,11 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, + + if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) || + (*requested_down >= 0 && requested_down_corrected <= allocated_down)) { ++ /* ++ * If bandwidth on a link is < asym_threshold transition ++ * the link to symmetric. ++ */ ++ tb_configure_sym(tb, in, out, *requested_up, *requested_down); + /* + * If requested bandwidth is less or equal than what is + * currently allocated to that tunnel we simply change +@@ -1908,7 +2349,8 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, + * are also in the same group but we use the same function here + * that we use with the normal bandwidth allocation). + */ +- ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down); ++ ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down, ++ true); + if (ret) + goto reclaim; + +@@ -1917,8 +2359,23 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, + + if ((*requested_up >= 0 && available_up >= requested_up_corrected) || + (*requested_down >= 0 && available_down >= requested_down_corrected)) { ++ /* ++ * If bandwidth on a link is >= asym_threshold ++ * transition the link to asymmetric. ++ */ ++ ret = tb_configure_asym(tb, in, out, *requested_up, ++ *requested_down); ++ if (ret) { ++ tb_configure_sym(tb, in, out, 0, 0); ++ return ret; ++ } ++ + ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up, + requested_down); ++ if (ret) { ++ tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n"); ++ tb_configure_sym(tb, in, out, 0, 0); ++ } + } else { + ret = -ENOBUFS; + } +@@ -1984,7 +2441,7 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work) + + out = tunnel->dst_port; + +- if (in->sw->config.depth < out->sw->config.depth) { ++ if (tb_port_path_direction_downstream(in, out)) { + requested_up = -1; + requested_down = requested_bw; + } else { +@@ -2240,7 +2697,8 @@ static void tb_restore_children(struct tb_switch *sw) + continue; + + if (port->remote) { +- tb_switch_lane_bonding_enable(port->remote->sw); ++ tb_switch_set_link_width(port->remote->sw, ++ port->remote->sw->link_width); + tb_switch_configure_link(port->remote->sw); + + tb_restore_children(port->remote->sw); +diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h +index 8a75aabb9ce8e5..920dac8a63e1df 100644 +--- a/drivers/thunderbolt/tb.h ++++ b/drivers/thunderbolt/tb.h +@@ -164,11 +164,6 @@ struct tb_switch_tmu { + * switches) you need to have domain lock held. + * + * In USB4 terminology this structure represents a router. +- * +- * Note @link_width is not the same as whether link is bonded or not. +- * For Gen 4 links the link is also bonded when it is asymmetric. The +- * correct way to find out whether the link is bonded or not is to look +- * @bonded field of the upstream port. + */ + struct tb_switch { + struct device dev; +@@ -868,6 +863,15 @@ static inline struct tb_port *tb_switch_downstream_port(struct tb_switch *sw) + return tb_port_at(tb_route(sw), tb_switch_parent(sw)); + } + ++/** ++ * tb_switch_depth() - Returns depth of the connected router ++ * @sw: Router ++ */ ++static inline int tb_switch_depth(const struct tb_switch *sw) ++{ ++ return sw->config.depth; ++} ++ + static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw) + { + return sw->config.vendor_id == PCI_VENDOR_ID_INTEL && +@@ -960,8 +964,7 @@ static inline bool tb_switch_is_icm(const struct tb_switch *sw) + return !sw->config.enabled; + } + +-int tb_switch_lane_bonding_enable(struct tb_switch *sw); +-void tb_switch_lane_bonding_disable(struct tb_switch *sw); ++int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width); + int tb_switch_configure_link(struct tb_switch *sw); + void tb_switch_unconfigure_link(struct tb_switch *sw); + +@@ -1044,6 +1047,21 @@ void tb_port_release_out_hopid(struct tb_port *port, int hopid); + struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, + struct tb_port *prev); + ++/** ++ * tb_port_path_direction_downstream() - Checks if path directed downstream ++ * @src: Source adapter ++ * @dst: Destination adapter ++ * ++ * Returns %true only if the specified path from source adapter (@src) ++ * to destination adapter (@dst) is directed downstream. ++ */ ++static inline bool ++tb_port_path_direction_downstream(const struct tb_port *src, ++ const struct tb_port *dst) ++{ ++ return src->sw->config.depth < dst->sw->config.depth; ++} ++ + static inline bool tb_port_use_credit_allocation(const struct tb_port *port) + { + return tb_port_is_null(port) && port->sw->credit_allocation; +@@ -1061,12 +1079,29 @@ static inline bool tb_port_use_credit_allocation(const struct tb_port *port) + for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \ + (p) = tb_next_port_on_path((src), (dst), (p))) + ++/** ++ * tb_for_each_upstream_port_on_path() - Iterate over each upstreamm port on path ++ * @src: Source port ++ * @dst: Destination port ++ * @p: Port used as iterator ++ * ++ * Walks over each upstream lane adapter on path from @src to @dst. ++ */ ++#define tb_for_each_upstream_port_on_path(src, dst, p) \ ++ for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \ ++ (p) = tb_next_port_on_path((src), (dst), (p))) \ ++ if (!tb_port_is_null((p)) || !tb_is_upstream_port((p))) {\ ++ continue; \ ++ } else ++ + int tb_port_get_link_speed(struct tb_port *port); ++int tb_port_get_link_generation(struct tb_port *port); + int tb_port_get_link_width(struct tb_port *port); ++bool tb_port_width_supported(struct tb_port *port, unsigned int width); + int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width); + int tb_port_lane_bonding_enable(struct tb_port *port); + void tb_port_lane_bonding_disable(struct tb_port *port); +-int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask, ++int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width, + int timeout_msec); + int tb_port_update_credits(struct tb_port *port); + +@@ -1264,6 +1299,11 @@ int usb4_port_router_online(struct tb_port *port); + int usb4_port_enumerate_retimers(struct tb_port *port); + bool usb4_port_clx_supported(struct tb_port *port); + int usb4_port_margining_caps(struct tb_port *port, u32 *caps); ++ ++bool usb4_port_asym_supported(struct tb_port *port); ++int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width); ++int usb4_port_asym_start(struct tb_port *port); ++ + int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes, + unsigned int ber_level, bool timing, bool right_high, + u32 *results); +diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h +index 736e28beac119f..4419e274d2b4c6 100644 +--- a/drivers/thunderbolt/tb_regs.h ++++ b/drivers/thunderbolt/tb_regs.h +@@ -348,10 +348,14 @@ struct tb_regs_port_header { + #define LANE_ADP_CS_1 0x01 + #define LANE_ADP_CS_1_TARGET_SPEED_MASK GENMASK(3, 0) + #define LANE_ADP_CS_1_TARGET_SPEED_GEN3 0xc +-#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4) ++#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(5, 4) + #define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4 + #define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1 + #define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3 ++#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK GENMASK(7, 6) ++#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_TX 0x1 ++#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_RX 0x2 ++#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_DUAL 0x0 + #define LANE_ADP_CS_1_CL0S_ENABLE BIT(10) + #define LANE_ADP_CS_1_CL1_ENABLE BIT(11) + #define LANE_ADP_CS_1_CL2_ENABLE BIT(12) +@@ -384,6 +388,8 @@ struct tb_regs_port_header { + #define PORT_CS_18_WOCS BIT(16) + #define PORT_CS_18_WODS BIT(17) + #define PORT_CS_18_WOU4S BIT(18) ++#define PORT_CS_18_CSA BIT(22) ++#define PORT_CS_18_TIP BIT(24) + #define PORT_CS_19 0x13 + #define PORT_CS_19_DPR BIT(0) + #define PORT_CS_19_PC BIT(3) +@@ -391,6 +397,7 @@ struct tb_regs_port_header { + #define PORT_CS_19_WOC BIT(16) + #define PORT_CS_19_WOD BIT(17) + #define PORT_CS_19_WOU4 BIT(18) ++#define PORT_CS_19_START_ASYM BIT(24) + + /* Display Port adapter registers */ + #define ADP_DP_CS_0 0x00 +diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c +index a6810fb368600f..8aec678d80d357 100644 +--- a/drivers/thunderbolt/tunnel.c ++++ b/drivers/thunderbolt/tunnel.c +@@ -21,12 +21,18 @@ + #define TB_PCI_PATH_DOWN 0 + #define TB_PCI_PATH_UP 1 + ++#define TB_PCI_PRIORITY 3 ++#define TB_PCI_WEIGHT 1 ++ + /* USB3 adapters use always HopID of 8 for both directions */ + #define TB_USB3_HOPID 8 + + #define TB_USB3_PATH_DOWN 0 + #define TB_USB3_PATH_UP 1 + ++#define TB_USB3_PRIORITY 3 ++#define TB_USB3_WEIGHT 2 ++ + /* DP adapters use HopID 8 for AUX and 9 for Video */ + #define TB_DP_AUX_TX_HOPID 8 + #define TB_DP_AUX_RX_HOPID 8 +@@ -36,6 +42,12 @@ + #define TB_DP_AUX_PATH_OUT 1 + #define TB_DP_AUX_PATH_IN 2 + ++#define TB_DP_VIDEO_PRIORITY 1 ++#define TB_DP_VIDEO_WEIGHT 1 ++ ++#define TB_DP_AUX_PRIORITY 2 ++#define TB_DP_AUX_WEIGHT 1 ++ + /* Minimum number of credits needed for PCIe path */ + #define TB_MIN_PCIE_CREDITS 6U + /* +@@ -46,6 +58,18 @@ + /* Minimum number of credits for DMA path */ + #define TB_MIN_DMA_CREDITS 1 + ++#define TB_DMA_PRIORITY 5 ++#define TB_DMA_WEIGHT 1 ++ ++/* ++ * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic ++ * according to USB4 v2 Connection Manager guide. This ends up reserving ++ * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into ++ * account. ++ */ ++#define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT) ++#define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT) ++ + static unsigned int dma_credits = TB_DMA_CREDITS; + module_param(dma_credits, uint, 0444); + MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: " +@@ -58,27 +82,6 @@ MODULE_PARM_DESC(bw_alloc_mode, + + static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" }; + +-#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \ +- do { \ +- struct tb_tunnel *__tunnel = (tunnel); \ +- level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \ +- tb_route(__tunnel->src_port->sw), \ +- __tunnel->src_port->port, \ +- tb_route(__tunnel->dst_port->sw), \ +- __tunnel->dst_port->port, \ +- tb_tunnel_names[__tunnel->type], \ +- ## arg); \ +- } while (0) +- +-#define tb_tunnel_WARN(tunnel, fmt, arg...) \ +- __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg) +-#define tb_tunnel_warn(tunnel, fmt, arg...) \ +- __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg) +-#define tb_tunnel_info(tunnel, fmt, arg...) \ +- __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg) +-#define tb_tunnel_dbg(tunnel, fmt, arg...) \ +- __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg) +- + static inline unsigned int tb_usable_credits(const struct tb_port *port) + { + return port->total_credits - port->ctl_credits; +@@ -156,11 +159,11 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths, + + static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable) + { ++ struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw); + int ret; + + /* Only supported of both routers are at least USB4 v2 */ +- if (usb4_switch_version(tunnel->src_port->sw) < 2 || +- usb4_switch_version(tunnel->dst_port->sw) < 2) ++ if (tb_port_get_link_generation(port) < 4) + return 0; + + ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable); +@@ -234,8 +237,8 @@ static int tb_pci_init_path(struct tb_path *path) + path->egress_shared_buffer = TB_PATH_NONE; + path->ingress_fc_enable = TB_PATH_ALL; + path->ingress_shared_buffer = TB_PATH_NONE; +- path->priority = 3; +- path->weight = 1; ++ path->priority = TB_PCI_PRIORITY; ++ path->weight = TB_PCI_WEIGHT; + path->drop_packages = 0; + + tb_path_for_each_hop(path, hop) { +@@ -376,6 +379,51 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, + return NULL; + } + ++/** ++ * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe ++ * @port: Lane 0 adapter ++ * @reserved_up: Upstream bandwidth in Mb/s to reserve ++ * @reserved_down: Downstream bandwidth in Mb/s to reserve ++ * ++ * Can be called to any connected lane 0 adapter to find out how much ++ * bandwidth needs to be left in reserve for possible PCIe bulk traffic. ++ * Returns true if there is something to be reserved and writes the ++ * amount to @reserved_down/@reserved_up. Otherwise returns false and ++ * does not touch the parameters. ++ */ ++bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up, ++ int *reserved_down) ++{ ++ if (WARN_ON_ONCE(!port->remote)) ++ return false; ++ ++ if (!tb_acpi_may_tunnel_pcie()) ++ return false; ++ ++ if (tb_port_get_link_generation(port) < 4) ++ return false; ++ ++ /* Must have PCIe adapters */ ++ if (tb_is_upstream_port(port)) { ++ if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP)) ++ return false; ++ if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN)) ++ return false; ++ } else { ++ if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN)) ++ return false; ++ if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP)) ++ return false; ++ } ++ ++ *reserved_up = USB4_V2_PCI_MIN_BANDWIDTH; ++ *reserved_down = USB4_V2_PCI_MIN_BANDWIDTH; ++ ++ tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up, ++ *reserved_down); ++ return true; ++} ++ + static bool tb_dp_is_usb4(const struct tb_switch *sw) + { + /* Titan Ridge DP adapters need the same treatment as USB4 */ +@@ -614,8 +662,9 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) + + in_rate = tb_dp_cap_get_rate(in_dp_cap); + in_lanes = tb_dp_cap_get_lanes(in_dp_cap); +- tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", +- in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes)); ++ tb_tunnel_dbg(tunnel, ++ "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", ++ in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes)); + + /* + * If the tunnel bandwidth is limited (max_bw is set) then see +@@ -624,10 +673,11 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) + out_rate = tb_dp_cap_get_rate(out_dp_cap); + out_lanes = tb_dp_cap_get_lanes(out_dp_cap); + bw = tb_dp_bandwidth(out_rate, out_lanes); +- tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", +- out_rate, out_lanes, bw); ++ tb_tunnel_dbg(tunnel, ++ "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", ++ out_rate, out_lanes, bw); + +- if (in->sw->config.depth < out->sw->config.depth) ++ if (tb_port_path_direction_downstream(in, out)) + max_bw = tunnel->max_down; + else + max_bw = tunnel->max_up; +@@ -639,13 +689,14 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) + out_rate, out_lanes, &new_rate, + &new_lanes); + if (ret) { +- tb_port_info(out, "not enough bandwidth for DP tunnel\n"); ++ tb_tunnel_info(tunnel, "not enough bandwidth\n"); + return ret; + } + + new_bw = tb_dp_bandwidth(new_rate, new_lanes); +- tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n", +- new_rate, new_lanes, new_bw); ++ tb_tunnel_dbg(tunnel, ++ "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n", ++ new_rate, new_lanes, new_bw); + + /* + * Set new rate and number of lanes before writing it to +@@ -662,7 +713,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) + */ + if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) { + out_dp_cap |= DP_COMMON_CAP_LTTPR_NS; +- tb_port_dbg(out, "disabling LTTPR\n"); ++ tb_tunnel_dbg(tunnel, "disabling LTTPR\n"); + } + + return tb_port_write(in, &out_dp_cap, TB_CFG_PORT, +@@ -712,8 +763,8 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel) + lanes = min(in_lanes, out_lanes); + tmp = tb_dp_bandwidth(rate, lanes); + +- tb_port_dbg(in, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", rate, +- lanes, tmp); ++ tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", ++ rate, lanes, tmp); + + ret = usb4_dp_port_set_nrd(in, rate, lanes); + if (ret) +@@ -728,15 +779,15 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel) + rate = min(in_rate, out_rate); + tmp = tb_dp_bandwidth(rate, lanes); + +- tb_port_dbg(in, +- "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n", +- rate, lanes, tmp); ++ tb_tunnel_dbg(tunnel, ++ "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n", ++ rate, lanes, tmp); + + for (granularity = 250; tmp / granularity > 255 && granularity <= 1000; + granularity *= 2) + ; + +- tb_port_dbg(in, "granularity %d Mb/s\n", granularity); ++ tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity); + + /* + * Returns -EINVAL if granularity above is outside of the +@@ -751,12 +802,12 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel) + * max_up/down fields. For discovery we just read what the + * estimation was set to. + */ +- if (in->sw->config.depth < out->sw->config.depth) ++ if (tb_port_path_direction_downstream(in, out)) + estimated_bw = tunnel->max_down; + else + estimated_bw = tunnel->max_up; + +- tb_port_dbg(in, "estimated bandwidth %d Mb/s\n", estimated_bw); ++ tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw); + + ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw); + if (ret) +@@ -767,7 +818,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel) + if (ret) + return ret; + +- tb_port_dbg(in, "bandwidth allocation mode enabled\n"); ++ tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n"); + return 0; + } + +@@ -788,7 +839,7 @@ static int tb_dp_init(struct tb_tunnel *tunnel) + if (!usb4_dp_port_bandwidth_mode_supported(in)) + return 0; + +- tb_port_dbg(in, "bandwidth allocation mode supported\n"); ++ tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n"); + + ret = usb4_dp_port_set_cm_id(in, tb->index); + if (ret) +@@ -805,7 +856,7 @@ static void tb_dp_deinit(struct tb_tunnel *tunnel) + return; + if (usb4_dp_port_bandwidth_mode_enabled(in)) { + usb4_dp_port_set_cm_bandwidth_mode_supported(in, false); +- tb_port_dbg(in, "bandwidth allocation mode disabled\n"); ++ tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n"); + } + } + +@@ -921,10 +972,7 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel, + if (allocated_bw == max_bw) + allocated_bw = ret; + +- tb_port_dbg(in, "consumed bandwidth through allocation mode %d Mb/s\n", +- allocated_bw); +- +- if (in->sw->config.depth < out->sw->config.depth) { ++ if (tb_port_path_direction_downstream(in, out)) { + *consumed_up = 0; + *consumed_down = allocated_bw; + } else { +@@ -959,7 +1007,7 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up + if (allocated_bw == max_bw) + allocated_bw = ret; + +- if (in->sw->config.depth < out->sw->config.depth) { ++ if (tb_port_path_direction_downstream(in, out)) { + *allocated_up = 0; + *allocated_down = allocated_bw; + } else { +@@ -987,7 +1035,7 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, + if (ret < 0) + return ret; + +- if (in->sw->config.depth < out->sw->config.depth) { ++ if (tb_port_path_direction_downstream(in, out)) { + tmp = min(*alloc_down, max_bw); + ret = usb4_dp_port_allocate_bandwidth(in, tmp); + if (ret) +@@ -1006,9 +1054,6 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, + /* Now we can use BW mode registers to figure out the bandwidth */ + /* TODO: need to handle discovery too */ + tunnel->bw_mode = true; +- +- tb_port_dbg(in, "allocated bandwidth through allocation mode %d Mb/s\n", +- tmp); + return 0; + } + +@@ -1035,8 +1080,7 @@ static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes, + *rate = tb_dp_cap_get_rate(val); + *lanes = tb_dp_cap_get_lanes(val); + +- tb_port_dbg(in, "consumed bandwidth through DPRX %d Mb/s\n", +- tb_dp_bandwidth(*rate, *lanes)); ++ tb_tunnel_dbg(tunnel, "DPRX read done\n"); + return 0; + } + usleep_range(100, 150); +@@ -1073,9 +1117,6 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate, + + *rate = tb_dp_cap_get_rate(val); + *lanes = tb_dp_cap_get_lanes(val); +- +- tb_port_dbg(in, "bandwidth from %#x capability %d Mb/s\n", cap, +- tb_dp_bandwidth(*rate, *lanes)); + return 0; + } + +@@ -1092,7 +1133,7 @@ static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, + if (ret < 0) + return ret; + +- if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) { ++ if (tb_port_path_direction_downstream(in, tunnel->dst_port)) { + *max_up = 0; + *max_down = ret; + } else { +@@ -1150,7 +1191,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, + return 0; + } + +- if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) { ++ if (tb_port_path_direction_downstream(in, tunnel->dst_port)) { + *consumed_up = 0; + *consumed_down = tb_dp_bandwidth(rate, lanes); + } else { +@@ -1180,8 +1221,8 @@ static void tb_dp_init_aux_path(struct tb_path *path) + path->egress_shared_buffer = TB_PATH_NONE; + path->ingress_fc_enable = TB_PATH_ALL; + path->ingress_shared_buffer = TB_PATH_NONE; +- path->priority = 2; +- path->weight = 1; ++ path->priority = TB_DP_AUX_PRIORITY; ++ path->weight = TB_DP_AUX_WEIGHT; + + tb_path_for_each_hop(path, hop) + tb_dp_init_aux_credits(hop); +@@ -1224,8 +1265,8 @@ static int tb_dp_init_video_path(struct tb_path *path) + path->egress_shared_buffer = TB_PATH_NONE; + path->ingress_fc_enable = TB_PATH_NONE; + path->ingress_shared_buffer = TB_PATH_NONE; +- path->priority = 1; +- path->weight = 1; ++ path->priority = TB_DP_VIDEO_PRIORITY; ++ path->weight = TB_DP_VIDEO_WEIGHT; + + tb_path_for_each_hop(path, hop) { + int ret; +@@ -1253,8 +1294,9 @@ static void tb_dp_dump(struct tb_tunnel *tunnel) + rate = tb_dp_cap_get_rate(dp_cap); + lanes = tb_dp_cap_get_lanes(dp_cap); + +- tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", +- rate, lanes, tb_dp_bandwidth(rate, lanes)); ++ tb_tunnel_dbg(tunnel, ++ "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", ++ rate, lanes, tb_dp_bandwidth(rate, lanes)); + + out = tunnel->dst_port; + +@@ -1265,8 +1307,9 @@ static void tb_dp_dump(struct tb_tunnel *tunnel) + rate = tb_dp_cap_get_rate(dp_cap); + lanes = tb_dp_cap_get_lanes(dp_cap); + +- tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", +- rate, lanes, tb_dp_bandwidth(rate, lanes)); ++ tb_tunnel_dbg(tunnel, ++ "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", ++ rate, lanes, tb_dp_bandwidth(rate, lanes)); + + if (tb_port_read(in, &dp_cap, TB_CFG_PORT, + in->cap_adap + DP_REMOTE_CAP, 1)) +@@ -1275,8 +1318,8 @@ static void tb_dp_dump(struct tb_tunnel *tunnel) + rate = tb_dp_cap_get_rate(dp_cap); + lanes = tb_dp_cap_get_lanes(dp_cap); + +- tb_port_dbg(in, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n", +- rate, lanes, tb_dp_bandwidth(rate, lanes)); ++ tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n", ++ rate, lanes, tb_dp_bandwidth(rate, lanes)); + } + + /** +@@ -1497,8 +1540,8 @@ static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits) + path->ingress_fc_enable = TB_PATH_ALL; + path->egress_shared_buffer = TB_PATH_NONE; + path->ingress_shared_buffer = TB_PATH_NONE; +- path->priority = 5; +- path->weight = 1; ++ path->priority = TB_DMA_PRIORITY; ++ path->weight = TB_DMA_WEIGHT; + path->clear_fc = true; + + /* +@@ -1531,8 +1574,8 @@ static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits) + path->ingress_fc_enable = TB_PATH_ALL; + path->egress_shared_buffer = TB_PATH_NONE; + path->ingress_shared_buffer = TB_PATH_NONE; +- path->priority = 5; +- path->weight = 1; ++ path->priority = TB_DMA_PRIORITY; ++ path->weight = TB_DMA_WEIGHT; + path->clear_fc = true; + + tb_path_for_each_hop(path, hop) { +@@ -1758,14 +1801,23 @@ static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate) + static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel, + int *consumed_up, int *consumed_down) + { +- int pcie_enabled = tb_acpi_may_tunnel_pcie(); ++ struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw); ++ int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0; + + /* + * PCIe tunneling, if enabled, affects the USB3 bandwidth so + * take that it into account here. + */ +- *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3; +- *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3; ++ *consumed_up = tunnel->allocated_up * ++ (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT; ++ *consumed_down = tunnel->allocated_down * ++ (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT; ++ ++ if (tb_port_get_link_generation(port) >= 4) { ++ *consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH); ++ *consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH); ++ } ++ + return 0; + } + +@@ -1871,8 +1923,8 @@ static void tb_usb3_init_path(struct tb_path *path) + path->egress_shared_buffer = TB_PATH_NONE; + path->ingress_fc_enable = TB_PATH_ALL; + path->ingress_shared_buffer = TB_PATH_NONE; +- path->priority = 3; +- path->weight = 3; ++ path->priority = TB_USB3_PRIORITY; ++ path->weight = TB_USB3_WEIGHT; + path->drop_packages = 0; + + tb_path_for_each_hop(path, hop) +@@ -2387,3 +2439,8 @@ void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel, + tunnel->reclaim_available_bandwidth(tunnel, available_up, + available_down); + } ++ ++const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel) ++{ ++ return tb_tunnel_names[tunnel->type]; ++} +diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h +index bf690f7beeeeba..b4cff5482112d3 100644 +--- a/drivers/thunderbolt/tunnel.h ++++ b/drivers/thunderbolt/tunnel.h +@@ -80,6 +80,8 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down, + bool alloc_hopid); + struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, + struct tb_port *down); ++bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up, ++ int *reserved_down); + struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, + bool alloc_hopid); + struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, +@@ -137,5 +139,27 @@ static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel) + return tunnel->type == TB_TUNNEL_USB3; + } + +-#endif ++const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel); ++ ++#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \ ++ do { \ ++ struct tb_tunnel *__tunnel = (tunnel); \ ++ level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \ ++ tb_route(__tunnel->src_port->sw), \ ++ __tunnel->src_port->port, \ ++ tb_route(__tunnel->dst_port->sw), \ ++ __tunnel->dst_port->port, \ ++ tb_tunnel_type_name(__tunnel), \ ++ ## arg); \ ++ } while (0) + ++#define tb_tunnel_WARN(tunnel, fmt, arg...) \ ++ __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg) ++#define tb_tunnel_warn(tunnel, fmt, arg...) \ ++ __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg) ++#define tb_tunnel_info(tunnel, fmt, arg...) \ ++ __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg) ++#define tb_tunnel_dbg(tunnel, fmt, arg...) \ ++ __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg) ++ ++#endif +diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c +index 3aa32d7f9f6a1d..8db9bd32f47384 100644 +--- a/drivers/thunderbolt/usb4.c ++++ b/drivers/thunderbolt/usb4.c +@@ -1494,6 +1494,112 @@ bool usb4_port_clx_supported(struct tb_port *port) + return !!(val & PORT_CS_18_CPS); + } + ++/** ++ * usb4_port_asym_supported() - If the port supports asymmetric link ++ * @port: USB4 port ++ * ++ * Checks if the port and the cable supports asymmetric link and returns ++ * %true in that case. ++ */ ++bool usb4_port_asym_supported(struct tb_port *port) ++{ ++ u32 val; ++ ++ if (!port->cap_usb4) ++ return false; ++ ++ if (tb_port_read(port, &val, TB_CFG_PORT, port->cap_usb4 + PORT_CS_18, 1)) ++ return false; ++ ++ return !!(val & PORT_CS_18_CSA); ++} ++ ++/** ++ * usb4_port_asym_set_link_width() - Set link width to asymmetric or symmetric ++ * @port: USB4 port ++ * @width: Asymmetric width to configure ++ * ++ * Sets USB4 port link width to @width. Can be called for widths where ++ * usb4_port_asym_width_supported() returned @true. ++ */ ++int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width) ++{ ++ u32 val; ++ int ret; ++ ++ if (!port->cap_phy) ++ return -EINVAL; ++ ++ ret = tb_port_read(port, &val, TB_CFG_PORT, ++ port->cap_phy + LANE_ADP_CS_1, 1); ++ if (ret) ++ return ret; ++ ++ val &= ~LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK; ++ switch (width) { ++ case TB_LINK_WIDTH_DUAL: ++ val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK, ++ LANE_ADP_CS_1_TARGET_WIDTH_ASYM_DUAL); ++ break; ++ case TB_LINK_WIDTH_ASYM_TX: ++ val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK, ++ LANE_ADP_CS_1_TARGET_WIDTH_ASYM_TX); ++ break; ++ case TB_LINK_WIDTH_ASYM_RX: ++ val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK, ++ LANE_ADP_CS_1_TARGET_WIDTH_ASYM_RX); ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ return tb_port_write(port, &val, TB_CFG_PORT, ++ port->cap_phy + LANE_ADP_CS_1, 1); ++} ++ ++/** ++ * usb4_port_asym_start() - Start symmetry change and wait for completion ++ * @port: USB4 port ++ * ++ * Start symmetry change of the link to asymmetric or symmetric ++ * (according to what was previously set in tb_port_set_link_width(). ++ * Wait for completion of the change. ++ * ++ * Returns %0 in case of success, %-ETIMEDOUT if case of timeout or ++ * a negative errno in case of a failure. ++ */ ++int usb4_port_asym_start(struct tb_port *port) ++{ ++ int ret; ++ u32 val; ++ ++ ret = tb_port_read(port, &val, TB_CFG_PORT, ++ port->cap_usb4 + PORT_CS_19, 1); ++ if (ret) ++ return ret; ++ ++ val &= ~PORT_CS_19_START_ASYM; ++ val |= FIELD_PREP(PORT_CS_19_START_ASYM, 1); ++ ++ ret = tb_port_write(port, &val, TB_CFG_PORT, ++ port->cap_usb4 + PORT_CS_19, 1); ++ if (ret) ++ return ret; ++ ++ /* ++ * Wait for PORT_CS_19_START_ASYM to be 0. This means the USB4 ++ * port started the symmetry transition. ++ */ ++ ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_19, ++ PORT_CS_19_START_ASYM, 0, 1000); ++ if (ret) ++ return ret; ++ ++ /* Then wait for the transtion to be completed */ ++ return usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_18, ++ PORT_CS_18_TIP, 0, 5000); ++} ++ + /** + * usb4_port_margining_caps() - Read USB4 port marginig capabilities + * @port: USB4 port +@@ -2274,13 +2380,13 @@ int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw, + goto err_request; + + /* +- * Always keep 1000 Mb/s to make sure xHCI has at least some ++ * Always keep 900 Mb/s to make sure xHCI has at least some + * bandwidth available for isochronous traffic. + */ +- if (consumed_up < 1000) +- consumed_up = 1000; +- if (consumed_down < 1000) +- consumed_down = 1000; ++ if (consumed_up < 900) ++ consumed_up = 900; ++ if (consumed_down < 900) ++ consumed_down = 900; + + ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up, + consumed_down); +diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c +index 8f472a2080ffa0..4caecc3525bfd1 100644 +--- a/drivers/tty/serial/8250/8250_omap.c ++++ b/drivers/tty/serial/8250/8250_omap.c +@@ -1567,7 +1567,7 @@ static int omap8250_probe(struct platform_device *pdev) + ret = devm_request_irq(&pdev->dev, irq, omap8250_irq, 0, + dev_name(&pdev->dev), priv); + if (ret < 0) +- return ret; ++ goto err; + + priv->wakeirq = irq_of_parse_and_map(np, 1); + +diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c +index 2e1b1c827dfe60..f798ef3c41309e 100644 +--- a/drivers/tty/serial/qcom_geni_serial.c ++++ b/drivers/tty/serial/qcom_geni_serial.c +@@ -124,7 +124,7 @@ struct qcom_geni_serial_port { + dma_addr_t tx_dma_addr; + dma_addr_t rx_dma_addr; + bool setup; +- unsigned int baud; ++ unsigned long poll_timeout_us; + unsigned long clk_rate; + void *rx_buf; + u32 loopback; +@@ -270,22 +270,13 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport, + { + u32 reg; + struct qcom_geni_serial_port *port; +- unsigned int baud; +- unsigned int fifo_bits; + unsigned long timeout_us = 20000; + struct qcom_geni_private_data *private_data = uport->private_data; + + if (private_data->drv) { + port = to_dev_port(uport); +- baud = port->baud; +- if (!baud) +- baud = 115200; +- fifo_bits = port->tx_fifo_depth * port->tx_fifo_width; +- /* +- * Total polling iterations based on FIFO worth of bytes to be +- * sent at current baud. Add a little fluff to the wait. +- */ +- timeout_us = ((fifo_bits * USEC_PER_SEC) / baud) + 500; ++ if (port->poll_timeout_us) ++ timeout_us = port->poll_timeout_us; + } + + /* +@@ -1223,11 +1214,11 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport, + unsigned long clk_rate; + u32 ver, sampling_rate; + unsigned int avg_bw_core; ++ unsigned long timeout; + + qcom_geni_serial_stop_rx(uport); + /* baud rate */ + baud = uart_get_baud_rate(uport, termios, old, 300, 4000000); +- port->baud = baud; + + sampling_rate = UART_OVERSAMPLING; + /* Sampling rate is halved for IP versions >= 2.5 */ +@@ -1305,9 +1296,21 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport, + else + tx_trans_cfg |= UART_CTS_MASK; + +- if (baud) ++ if (baud) { + uart_update_timeout(uport, termios->c_cflag, baud); + ++ /* ++ * Make sure that qcom_geni_serial_poll_bitfield() waits for ++ * the FIFO, two-word intermediate transfer register and shift ++ * register to clear. ++ * ++ * Note that uart_fifo_timeout() also adds a 20 ms margin. ++ */ ++ timeout = jiffies_to_usecs(uart_fifo_timeout(uport)); ++ timeout += 3 * timeout / port->tx_fifo_depth; ++ WRITE_ONCE(port->poll_timeout_us, timeout); ++ } ++ + if (!uart_console(uport)) + writel(port->loopback, + uport->membase + SE_UART_LOOPBACK_CFG); +diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c +index de220ac8ca5494..5a1de6044b38cc 100644 +--- a/drivers/tty/serial/rp2.c ++++ b/drivers/tty/serial/rp2.c +@@ -578,8 +578,8 @@ static void rp2_reset_asic(struct rp2_card *card, unsigned int asic_id) + u32 clk_cfg; + + writew(1, base + RP2_GLOBAL_CMD); +- readw(base + RP2_GLOBAL_CMD); + msleep(100); ++ readw(base + RP2_GLOBAL_CMD); + writel(0, base + RP2_CLK_PRESCALER); + + /* TDM clock configuration */ +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c +index ed8798fdf522ad..5d5570bebfeb61 100644 +--- a/drivers/tty/serial/serial_core.c ++++ b/drivers/tty/serial/serial_core.c +@@ -2694,13 +2694,13 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options) + int ret = 0; + + tport = &state->port; +- mutex_lock(&tport->mutex); ++ ++ guard(mutex)(&tport->mutex); + + port = uart_port_check(state); +- if (!port || !(port->ops->poll_get_char && port->ops->poll_put_char)) { +- ret = -1; +- goto out; +- } ++ if (!port || port->type == PORT_UNKNOWN || ++ !(port->ops->poll_get_char && port->ops->poll_put_char)) ++ return -1; + + pm_state = state->pm_state; + uart_change_pm(state, UART_PM_STATE_ON); +@@ -2720,10 +2720,10 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options) + ret = uart_set_options(port, NULL, baud, parity, bits, flow); + console_list_unlock(); + } +-out: ++ + if (ret) + uart_change_pm(state, pm_state); +- mutex_unlock(&tport->mutex); ++ + return ret; + } + +diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c +index 922ae1d76d909b..643157a92c62a7 100644 +--- a/drivers/ufs/host/ufs-qcom.c ++++ b/drivers/ufs/host/ufs-qcom.c +@@ -93,7 +93,7 @@ static const struct __ufs_qcom_bw_table { + [MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 }, + [MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 }, + [MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 }, +- [MODE_MAX][0][0] = { 7643136, 307200 }, ++ [MODE_MAX][0][0] = { 7643136, 819200 }, + }; + + static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS]; +diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c +index e0e97a1386662d..1d18d5002ef01d 100644 +--- a/drivers/usb/cdns3/cdnsp-ring.c ++++ b/drivers/usb/cdns3/cdnsp-ring.c +@@ -718,7 +718,8 @@ int cdnsp_remove_request(struct cdnsp_device *pdev, + seg = cdnsp_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb, + cur_td->last_trb, hw_deq); + +- if (seg && (pep->ep_state & EP_ENABLED)) ++ if (seg && (pep->ep_state & EP_ENABLED) && ++ !(pep->ep_state & EP_DIS_IN_RROGRESS)) + cdnsp_find_new_dequeue_state(pdev, pep, preq->request.stream_id, + cur_td, &deq_state); + else +@@ -736,7 +737,8 @@ int cdnsp_remove_request(struct cdnsp_device *pdev, + * During disconnecting all endpoint will be disabled so we don't + * have to worry about updating dequeue pointer. + */ +- if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING) { ++ if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING || ++ pep->ep_state & EP_DIS_IN_RROGRESS) { + status = -ESHUTDOWN; + ret = cdnsp_cmd_set_deq(pdev, pep, &deq_state); + } +diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c +index ceca4d839dfd42..7ba760ee62e331 100644 +--- a/drivers/usb/cdns3/host.c ++++ b/drivers/usb/cdns3/host.c +@@ -62,7 +62,9 @@ static const struct xhci_plat_priv xhci_plat_cdns3_xhci = { + .resume_quirk = xhci_cdns3_resume_quirk, + }; + +-static const struct xhci_plat_priv xhci_plat_cdnsp_xhci; ++static const struct xhci_plat_priv xhci_plat_cdnsp_xhci = { ++ .quirks = XHCI_CDNS_SCTX_QUIRK, ++}; + + static int __cdns_host_init(struct cdns *cdns) + { +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 0c1b69d944ca45..605fea4611029b 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -962,10 +962,12 @@ static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss) + struct acm *acm = tty->driver_data; + + ss->line = acm->minor; ++ mutex_lock(&acm->port.mutex); + ss->close_delay = jiffies_to_msecs(acm->port.close_delay) / 10; + ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ? + ASYNC_CLOSING_WAIT_NONE : + jiffies_to_msecs(acm->port.closing_wait) / 10; ++ mutex_unlock(&acm->port.mutex); + return 0; + } + +diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c +index a8605b02115b1c..1ad8fa3f862a15 100644 +--- a/drivers/usb/dwc2/drd.c ++++ b/drivers/usb/dwc2/drd.c +@@ -127,6 +127,15 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role) + role = USB_ROLE_DEVICE; + } + ++ if ((IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || ++ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)) && ++ dwc2_is_device_mode(hsotg) && ++ hsotg->lx_state == DWC2_L2 && ++ hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE && ++ hsotg->bus_suspended && ++ !hsotg->params.no_clock_gating) ++ dwc2_gadget_exit_clock_gating(hsotg, 0); ++ + if (role == USB_ROLE_HOST) { + already = dwc2_ovr_avalid(hsotg, true); + } else if (role == USB_ROLE_DEVICE) { +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c +index b1e3fa54c63977..54c47463c215c2 100644 +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -2289,7 +2289,10 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir, + erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base); + erst_base &= ERST_BASE_RSVDP; + erst_base |= ir->erst.erst_dma_addr & ~ERST_BASE_RSVDP; +- xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base); ++ if (xhci->quirks & XHCI_WRITE_64_HI_LO) ++ hi_lo_writeq(erst_base, &ir->ir_set->erst_base); ++ else ++ xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base); + + /* Set the event ring dequeue address of this interrupter */ + xhci_set_hc_event_deq(xhci, ir); +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index 6cee705568c276..dd02b064ecd4bc 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -75,6 +75,9 @@ + #define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142 + #define PCI_DEVICE_ID_ASMEDIA_3242_XHCI 0x3242 + ++#define PCI_DEVICE_ID_CADENCE 0x17CD ++#define PCI_DEVICE_ID_CADENCE_SSP 0x0200 ++ + static const char hcd_name[] = "xhci_hcd"; + + static struct hc_driver __read_mostly xhci_pci_hc_driver; +@@ -539,6 +542,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) + xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH; + } + ++ if (pdev->vendor == PCI_DEVICE_ID_CADENCE && ++ pdev->device == PCI_DEVICE_ID_CADENCE_SSP) ++ xhci->quirks |= XHCI_CDNS_SCTX_QUIRK; ++ + /* xHC spec requires PCI devices to support D3hot and D3cold */ + if (xhci->hci_version >= 0x120) + xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; +@@ -721,8 +728,10 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) + static void xhci_pci_remove(struct pci_dev *dev) + { + struct xhci_hcd *xhci; ++ bool set_power_d3; + + xhci = hcd_to_xhci(pci_get_drvdata(dev)); ++ set_power_d3 = xhci->quirks & XHCI_SPURIOUS_WAKEUP; + + xhci->xhc_state |= XHCI_STATE_REMOVING; + +@@ -735,11 +744,11 @@ static void xhci_pci_remove(struct pci_dev *dev) + xhci->shared_hcd = NULL; + } + ++ usb_hcd_pci_remove(dev); ++ + /* Workaround for spurious wakeups at shutdown with HSW */ +- if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) ++ if (set_power_d3) + pci_set_power_state(dev, PCI_D3hot); +- +- usb_hcd_pci_remove(dev); + } + + /* +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 8dd85221cd9275..95d8cf24b01546 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -1414,6 +1414,20 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, + struct xhci_stream_ctx *ctx = + &ep->stream_info->stream_ctx_array[stream_id]; + deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK; ++ ++ /* ++ * Cadence xHCI controllers store some endpoint state ++ * information within Rsvd0 fields of Stream Endpoint ++ * context. This field is not cleared during Set TR ++ * Dequeue Pointer command which causes XDMA to skip ++ * over transfer ring and leads to data loss on stream ++ * pipe. ++ * To fix this issue driver must clear Rsvd0 field. ++ */ ++ if (xhci->quirks & XHCI_CDNS_SCTX_QUIRK) { ++ ctx->reserved[0] = 0; ++ ctx->reserved[1] = 0; ++ } + } else { + deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; + } +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index b29fe4716f34e4..f5efee06ff0674 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + + /* Code sharing between pci-quirks and xhci hcd */ + #include "xhci-ext-caps.h" +@@ -1913,6 +1914,8 @@ struct xhci_hcd { + #define XHCI_RESET_TO_DEFAULT BIT_ULL(44) + #define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45) + #define XHCI_ZHAOXIN_HOST BIT_ULL(46) ++#define XHCI_WRITE_64_HI_LO BIT_ULL(47) ++#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48) + + unsigned int num_active_eps; + unsigned int limit_active_eps; +diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c +index c8098e9b432e13..62b5a30edc4267 100644 +--- a/drivers/usb/misc/appledisplay.c ++++ b/drivers/usb/misc/appledisplay.c +@@ -107,7 +107,12 @@ static void appledisplay_complete(struct urb *urb) + case ACD_BTN_BRIGHT_UP: + case ACD_BTN_BRIGHT_DOWN: + pdata->button_pressed = 1; +- schedule_delayed_work(&pdata->work, 0); ++ /* ++ * there is a window during which no device ++ * is registered ++ */ ++ if (pdata->bd ) ++ schedule_delayed_work(&pdata->work, 0); + break; + case ACD_BTN_NONE: + default: +@@ -202,6 +207,7 @@ static int appledisplay_probe(struct usb_interface *iface, + const struct usb_device_id *id) + { + struct backlight_properties props; ++ struct backlight_device *backlight; + struct appledisplay *pdata; + struct usb_device *udev = interface_to_usbdev(iface); + struct usb_endpoint_descriptor *endpoint; +@@ -272,13 +278,14 @@ static int appledisplay_probe(struct usb_interface *iface, + memset(&props, 0, sizeof(struct backlight_properties)); + props.type = BACKLIGHT_RAW; + props.max_brightness = 0xff; +- pdata->bd = backlight_device_register(bl_name, NULL, pdata, ++ backlight = backlight_device_register(bl_name, NULL, pdata, + &appledisplay_bl_data, &props); +- if (IS_ERR(pdata->bd)) { ++ if (IS_ERR(backlight)) { + dev_err(&iface->dev, "Backlight registration failed\n"); +- retval = PTR_ERR(pdata->bd); ++ retval = PTR_ERR(backlight); + goto error; + } ++ pdata->bd = backlight; + + /* Try to get brightness */ + brightness = appledisplay_bl_get_brightness(pdata->bd); +diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c +index cecd7693b7413c..75f5a740cba397 100644 +--- a/drivers/usb/misc/cypress_cy7c63.c ++++ b/drivers/usb/misc/cypress_cy7c63.c +@@ -88,6 +88,9 @@ static int vendor_command(struct cypress *dev, unsigned char request, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER, + address, data, iobuf, CYPRESS_MAX_REQSIZE, + USB_CTRL_GET_TIMEOUT); ++ /* we must not process garbage */ ++ if (retval < 2) ++ goto err_buf; + + /* store returned data (more READs to be added) */ + switch (request) { +@@ -107,6 +110,7 @@ static int vendor_command(struct cypress *dev, unsigned char request, + break; + } + ++err_buf: + kfree(iobuf); + error: + return retval; +diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c +index c640f98d20c548..6adaaf66c14d7b 100644 +--- a/drivers/usb/misc/yurex.c ++++ b/drivers/usb/misc/yurex.c +@@ -34,6 +34,8 @@ + #define YUREX_BUF_SIZE 8 + #define YUREX_WRITE_TIMEOUT (HZ*2) + ++#define MAX_S64_STRLEN 20 /* {-}922337203685477580{7,8} */ ++ + /* table of devices that work with this driver */ + static struct usb_device_id yurex_table[] = { + { USB_DEVICE(YUREX_VENDOR_ID, YUREX_PRODUCT_ID) }, +@@ -401,8 +403,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, + { + struct usb_yurex *dev; + int len = 0; +- char in_buffer[20]; +- unsigned long flags; ++ char in_buffer[MAX_S64_STRLEN]; + + dev = file->private_data; + +@@ -412,13 +413,15 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, + return -ENODEV; + } + +- spin_lock_irqsave(&dev->lock, flags); +- len = snprintf(in_buffer, 20, "%lld\n", dev->bbu); +- spin_unlock_irqrestore(&dev->lock, flags); +- mutex_unlock(&dev->io_mutex); +- +- if (WARN_ON_ONCE(len >= sizeof(in_buffer))) ++ if (WARN_ON_ONCE(dev->bbu > S64_MAX || dev->bbu < S64_MIN)) { ++ mutex_unlock(&dev->io_mutex); + return -EIO; ++ } ++ ++ spin_lock_irq(&dev->lock); ++ scnprintf(in_buffer, MAX_S64_STRLEN, "%lld\n", dev->bbu); ++ spin_unlock_irq(&dev->lock); ++ mutex_unlock(&dev->io_mutex); + + return simple_read_from_buffer(buffer, count, ppos, in_buffer, len); + } +@@ -507,8 +510,11 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, + __func__, retval); + goto error; + } +- if (set && timeout) ++ if (set && timeout) { ++ spin_lock_irq(&dev->lock); + dev->bbu = c2; ++ spin_unlock_irq(&dev->lock); ++ } + return timeout ? count : -EIO; + + error: +diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c +index da2c31ccc13808..c29a195a0175c0 100644 +--- a/drivers/vhost/vdpa.c ++++ b/drivers/vhost/vdpa.c +@@ -191,11 +191,9 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid) + if (irq < 0) + return; + +- irq_bypass_unregister_producer(&vq->call_ctx.producer); + if (!vq->call_ctx.ctx) + return; + +- vq->call_ctx.producer.token = vq->call_ctx.ctx; + vq->call_ctx.producer.irq = irq; + ret = irq_bypass_register_producer(&vq->call_ctx.producer); + if (unlikely(ret)) +@@ -627,6 +625,14 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, + vq->last_avail_idx = vq_state.split.avail_index; + } + break; ++ case VHOST_SET_VRING_CALL: ++ if (vq->call_ctx.ctx) { ++ if (ops->get_status(vdpa) & ++ VIRTIO_CONFIG_S_DRIVER_OK) ++ vhost_vdpa_unsetup_vq_irq(v, idx); ++ vq->call_ctx.producer.token = NULL; ++ } ++ break; + } + + r = vhost_vring_ioctl(&v->vdev, cmd, argp); +@@ -659,13 +665,16 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, + cb.callback = vhost_vdpa_virtqueue_cb; + cb.private = vq; + cb.trigger = vq->call_ctx.ctx; ++ vq->call_ctx.producer.token = vq->call_ctx.ctx; ++ if (ops->get_status(vdpa) & ++ VIRTIO_CONFIG_S_DRIVER_OK) ++ vhost_vdpa_setup_vq_irq(v, idx); + } else { + cb.callback = NULL; + cb.private = NULL; + cb.trigger = NULL; + } + ops->set_vq_cb(vdpa, idx, &cb); +- vhost_vdpa_setup_vq_irq(v, idx); + break; + + case VHOST_SET_VRING_NUM: +@@ -1316,6 +1325,7 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep) + for (i = 0; i < nvqs; i++) { + vqs[i] = &v->vqs[i]; + vqs[i]->handle_kick = handle_vq_kick; ++ vqs[i]->call_ctx.ctx = NULL; + } + vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false, + vhost_vdpa_process_iotlb_msg); +diff --git a/drivers/video/fbdev/hpfb.c b/drivers/video/fbdev/hpfb.c +index 406c1383cbda91..1461a909e17ed2 100644 +--- a/drivers/video/fbdev/hpfb.c ++++ b/drivers/video/fbdev/hpfb.c +@@ -343,6 +343,7 @@ static int hpfb_dio_probe(struct dio_dev *d, const struct dio_device_id *ent) + if (hpfb_init_one(paddr, vaddr)) { + if (d->scode >= DIOII_SCBASE) + iounmap((void *)vaddr); ++ release_mem_region(d->resource.start, resource_size(&d->resource)); + return -ENOMEM; + } + return 0; +diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c +index 8ac021748d160d..79649b0e89e473 100644 +--- a/drivers/watchdog/imx_sc_wdt.c ++++ b/drivers/watchdog/imx_sc_wdt.c +@@ -213,29 +213,6 @@ static int imx_sc_wdt_probe(struct platform_device *pdev) + return devm_watchdog_register_device(dev, wdog); + } + +-static int __maybe_unused imx_sc_wdt_suspend(struct device *dev) +-{ +- struct imx_sc_wdt_device *imx_sc_wdd = dev_get_drvdata(dev); +- +- if (watchdog_active(&imx_sc_wdd->wdd)) +- imx_sc_wdt_stop(&imx_sc_wdd->wdd); +- +- return 0; +-} +- +-static int __maybe_unused imx_sc_wdt_resume(struct device *dev) +-{ +- struct imx_sc_wdt_device *imx_sc_wdd = dev_get_drvdata(dev); +- +- if (watchdog_active(&imx_sc_wdd->wdd)) +- imx_sc_wdt_start(&imx_sc_wdd->wdd); +- +- return 0; +-} +- +-static SIMPLE_DEV_PM_OPS(imx_sc_wdt_pm_ops, +- imx_sc_wdt_suspend, imx_sc_wdt_resume); +- + static const struct of_device_id imx_sc_wdt_dt_ids[] = { + { .compatible = "fsl,imx-sc-wdt", }, + { /* sentinel */ } +@@ -247,7 +224,6 @@ static struct platform_driver imx_sc_wdt_driver = { + .driver = { + .name = "imx-sc-wdt", + .of_match_table = imx_sc_wdt_dt_ids, +- .pm = &imx_sc_wdt_pm_ops, + }, + }; + module_platform_driver(imx_sc_wdt_driver); +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c +index 0e6c6c25d154f5..6d0d1c8a508bf9 100644 +--- a/drivers/xen/swiotlb-xen.c ++++ b/drivers/xen/swiotlb-xen.c +@@ -78,9 +78,15 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) + { + unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p); + unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size); ++ phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT); + + next_bfn = pfn_to_bfn(xen_pfn); + ++ /* If buffer is physically aligned, ensure DMA alignment. */ ++ if (IS_ALIGNED(p, algn) && ++ !IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn)) ++ return 1; ++ + for (i = 1; i < nr_pages; i++) + if (pfn_to_bfn(++xen_pfn) != ++next_bfn) + return 1; +@@ -140,7 +146,7 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t size, + void *ret; + + /* Align the allocation to the Xen page size */ +- size = 1UL << (order + XEN_PAGE_SHIFT); ++ size = ALIGN(size, XEN_PAGE_SIZE); + + ret = (void *)__get_free_pages(flags, get_order(size)); + if (!ret) +@@ -172,7 +178,7 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, + int order = get_order(size); + + /* Convert the size to actually allocated. */ +- size = 1UL << (order + XEN_PAGE_SHIFT); ++ size = ALIGN(size, XEN_PAGE_SIZE); + + if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) || + WARN_ON_ONCE(range_straddles_page_boundary(phys, size))) +diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h +index bda1fdbba666aa..ec6679a538c1dc 100644 +--- a/fs/btrfs/btrfs_inode.h ++++ b/fs/btrfs/btrfs_inode.h +@@ -82,8 +82,10 @@ struct btrfs_inode { + /* + * Lock for counters and all fields used to determine if the inode is in + * the log or not (last_trans, last_sub_trans, last_log_commit, +- * logged_trans), to access/update new_delalloc_bytes and to update the +- * VFS' inode number of bytes used. ++ * logged_trans), to access/update delalloc_bytes, new_delalloc_bytes, ++ * defrag_bytes, disk_i_size, outstanding_extents, csum_bytes and to ++ * update the VFS' inode number of bytes used. ++ * Also protects setting struct file::private_data. + */ + spinlock_t lock; + +@@ -102,6 +104,14 @@ struct btrfs_inode { + /* held while logging the inode in tree-log.c */ + struct mutex log_mutex; + ++ /* ++ * Counters to keep track of the number of extent item's we may use due ++ * to delalloc and such. outstanding_extents is the number of extent ++ * items we think we'll end up using, and reserved_extents is the number ++ * of extent items we've reserved metadata for. Protected by 'lock'. ++ */ ++ unsigned outstanding_extents; ++ + /* used to order data wrt metadata */ + struct btrfs_ordered_inode_tree ordered_tree; + +@@ -122,28 +132,31 @@ struct btrfs_inode { + u64 generation; + + /* +- * transid of the trans_handle that last modified this inode ++ * ID of the transaction handle that last modified this inode. ++ * Protected by 'lock'. + */ + u64 last_trans; + + /* +- * transid that last logged this inode ++ * ID of the transaction that last logged this inode. ++ * Protected by 'lock'. + */ + u64 logged_trans; + + /* +- * log transid when this inode was last modified ++ * Log transaction ID when this inode was last modified. ++ * Protected by 'lock'. + */ + int last_sub_trans; + +- /* a local copy of root's last_log_commit */ ++ /* A local copy of root's last_log_commit. Protected by 'lock'. */ + int last_log_commit; + + union { + /* + * Total number of bytes pending delalloc, used by stat to + * calculate the real block usage of the file. This is used +- * only for files. ++ * only for files. Protected by 'lock'. + */ + u64 delalloc_bytes; + /* +@@ -161,7 +174,7 @@ struct btrfs_inode { + * Total number of bytes pending delalloc that fall within a file + * range that is either a hole or beyond EOF (and no prealloc extent + * exists in the range). This is always <= delalloc_bytes and this +- * is used only for files. ++ * is used only for files. Protected by 'lock'. + */ + u64 new_delalloc_bytes; + /* +@@ -172,15 +185,15 @@ struct btrfs_inode { + }; + + /* +- * total number of bytes pending defrag, used by stat to check whether +- * it needs COW. ++ * Total number of bytes pending defrag, used by stat to check whether ++ * it needs COW. Protected by 'lock'. + */ + u64 defrag_bytes; + + /* +- * the size of the file stored in the metadata on disk. data=ordered ++ * The size of the file stored in the metadata on disk. data=ordered + * means the in-memory i_size might be larger than the size on disk +- * because not all the blocks are written yet. ++ * because not all the blocks are written yet. Protected by 'lock'. + */ + u64 disk_i_size; + +@@ -214,7 +227,7 @@ struct btrfs_inode { + + /* + * Number of bytes outstanding that are going to need csums. This is +- * used in ENOSPC accounting. ++ * used in ENOSPC accounting. Protected by 'lock'. + */ + u64 csum_bytes; + +@@ -223,14 +236,6 @@ struct btrfs_inode { + /* Read-only compatibility flags, upper half of inode_item::flags */ + u32 ro_flags; + +- /* +- * Counters to keep track of the number of extent item's we may use due +- * to delalloc and such. outstanding_extents is the number of extent +- * items we think we'll end up using, and reserved_extents is the number +- * of extent items we've reserved metadata for. +- */ +- unsigned outstanding_extents; +- + struct btrfs_block_rsv block_rsv; + + /* +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index 06333a74d6c4cb..f7bb4c34b984b3 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -445,6 +445,8 @@ struct btrfs_file_private { + void *filldir_buf; + u64 last_index; + struct extent_state *llseek_cached_state; ++ /* Task that allocated this structure. */ ++ struct task_struct *owner_task; + }; + + static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info) +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index 72851adc1feeb5..50d795a2542c3b 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -6175,13 +6175,13 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) + continue; + + ret = btrfs_trim_free_extents(device, &group_trimmed); ++ ++ trimmed += group_trimmed; + if (ret) { + dev_failed++; + dev_ret = ret; + break; + } +- +- trimmed += group_trimmed; + } + mutex_unlock(&fs_devices->device_list_mutex); + +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c +index 15fd8c00f4c083..fc6c91773bc894 100644 +--- a/fs/btrfs/file.c ++++ b/fs/btrfs/file.c +@@ -3481,7 +3481,7 @@ static bool find_desired_extent_in_hole(struct btrfs_inode *inode, int whence, + static loff_t find_desired_extent(struct file *file, loff_t offset, int whence) + { + struct btrfs_inode *inode = BTRFS_I(file->f_mapping->host); +- struct btrfs_file_private *private = file->private_data; ++ struct btrfs_file_private *private; + struct btrfs_fs_info *fs_info = inode->root->fs_info; + struct extent_state *cached_state = NULL; + struct extent_state **delalloc_cached_state; +@@ -3509,7 +3509,19 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence) + inode_get_bytes(&inode->vfs_inode) == i_size) + return i_size; + +- if (!private) { ++ spin_lock(&inode->lock); ++ private = file->private_data; ++ spin_unlock(&inode->lock); ++ ++ if (private && private->owner_task != current) { ++ /* ++ * Not allocated by us, don't use it as its cached state is used ++ * by the task that allocated it and we don't want neither to ++ * mess with it nor get incorrect results because it reflects an ++ * invalid state for the current task. ++ */ ++ private = NULL; ++ } else if (!private) { + private = kzalloc(sizeof(*private), GFP_KERNEL); + /* + * No worries if memory allocation failed. +@@ -3517,7 +3529,23 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence) + * lseek SEEK_HOLE/DATA calls to a file when there's delalloc, + * so everything will still be correct. + */ +- file->private_data = private; ++ if (private) { ++ bool free = false; ++ ++ private->owner_task = current; ++ ++ spin_lock(&inode->lock); ++ if (file->private_data) ++ free = true; ++ else ++ file->private_data = private; ++ spin_unlock(&inode->lock); ++ ++ if (free) { ++ kfree(private); ++ private = NULL; ++ } ++ } + } + + if (private) +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index d5297523d49772..5f0c9c3f3bbf09 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -533,13 +533,11 @@ static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info, + + range.minlen = max(range.minlen, minlen); + ret = btrfs_trim_fs(fs_info, &range); +- if (ret < 0) +- return ret; + + if (copy_to_user(arg, &range, sizeof(range))) + return -EFAULT; + +- return 0; ++ return ret; + } + + int __pure btrfs_is_empty_uuid(u8 *uuid) +diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c +index 1b999c6e419307..b98d42ca55647f 100644 +--- a/fs/btrfs/subpage.c ++++ b/fs/btrfs/subpage.c +@@ -713,8 +713,14 @@ void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page, + } + + #define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst) \ +- bitmap_cut(dst, subpage->bitmaps, 0, \ +- subpage_info->name##_offset, subpage_info->bitmap_nr_bits) ++{ \ ++ const int bitmap_nr_bits = subpage_info->bitmap_nr_bits; \ ++ \ ++ ASSERT(bitmap_nr_bits < BITS_PER_LONG); \ ++ *dst = bitmap_read(subpage->bitmaps, \ ++ subpage_info->name##_offset, \ ++ bitmap_nr_bits); \ ++} + + void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, + struct page *page, u64 start, u32 len) +diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c +index 46c1f749839569..3a8ec33a120490 100644 +--- a/fs/btrfs/tree-checker.c ++++ b/fs/btrfs/tree-checker.c +@@ -1493,7 +1493,7 @@ static int check_extent_item(struct extent_buffer *leaf, + dref_objectid > BTRFS_LAST_FREE_OBJECTID)) { + extent_err(leaf, slot, + "invalid data ref objectid value %llu", +- dref_root); ++ dref_objectid); + return -EUCLEAN; + } + if (unlikely(!IS_ALIGNED(dref_offset, +diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c +index 4dd8a993c60a8b..7c6f260a3be567 100644 +--- a/fs/cachefiles/xattr.c ++++ b/fs/cachefiles/xattr.c +@@ -64,9 +64,15 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object) + memcpy(buf->data, fscache_get_aux(object->cookie), len); + + ret = cachefiles_inject_write_error(); +- if (ret == 0) +- ret = vfs_setxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, +- buf, sizeof(struct cachefiles_xattr) + len, 0); ++ if (ret == 0) { ++ ret = mnt_want_write_file(file); ++ if (ret == 0) { ++ ret = vfs_setxattr(&nop_mnt_idmap, dentry, ++ cachefiles_xattr_cache, buf, ++ sizeof(struct cachefiles_xattr) + len, 0); ++ mnt_drop_write_file(file); ++ } ++ } + if (ret < 0) { + trace_cachefiles_vfs_error(object, file_inode(file), ret, + cachefiles_trace_setxattr_error); +@@ -151,8 +157,14 @@ int cachefiles_remove_object_xattr(struct cachefiles_cache *cache, + int ret; + + ret = cachefiles_inject_remove_error(); +- if (ret == 0) +- ret = vfs_removexattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache); ++ if (ret == 0) { ++ ret = mnt_want_write(cache->mnt); ++ if (ret == 0) { ++ ret = vfs_removexattr(&nop_mnt_idmap, dentry, ++ cachefiles_xattr_cache); ++ mnt_drop_write(cache->mnt); ++ } ++ } + if (ret < 0) { + trace_cachefiles_vfs_error(object, d_inode(dentry), ret, + cachefiles_trace_remxattr_error); +@@ -208,9 +220,15 @@ bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume) + memcpy(buf->data, p, volume->vcookie->coherency_len); + + ret = cachefiles_inject_write_error(); +- if (ret == 0) +- ret = vfs_setxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, +- buf, len, 0); ++ if (ret == 0) { ++ ret = mnt_want_write(volume->cache->mnt); ++ if (ret == 0) { ++ ret = vfs_setxattr(&nop_mnt_idmap, dentry, ++ cachefiles_xattr_cache, ++ buf, len, 0); ++ mnt_drop_write(volume->cache->mnt); ++ } ++ } + if (ret < 0) { + trace_cachefiles_vfs_error(NULL, d_inode(dentry), ret, + cachefiles_trace_setxattr_error); +diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c +index 6eae3f12ad503d..553af738bb3e1b 100644 +--- a/fs/crypto/fname.c ++++ b/fs/crypto/fname.c +@@ -74,13 +74,7 @@ struct fscrypt_nokey_name { + + static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) + { +- if (str->len == 1 && str->name[0] == '.') +- return true; +- +- if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.') +- return true; +- +- return false; ++ return is_dot_dotdot(str->name, str->len); + } + + /** +diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c +index 03bd55069d8600..2fe0f3af1a08ec 100644 +--- a/fs/ecryptfs/crypto.c ++++ b/fs/ecryptfs/crypto.c +@@ -1949,16 +1949,6 @@ int ecryptfs_encrypt_and_encode_filename( + return rc; + } + +-static bool is_dot_dotdot(const char *name, size_t name_size) +-{ +- if (name_size == 1 && name[0] == '.') +- return true; +- else if (name_size == 2 && name[0] == '.' && name[1] == '.') +- return true; +- +- return false; +-} +- + /** + * ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext + * @plaintext_name: The plaintext name +diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c +index edc8ec7581b8f0..9e40bee3682f7d 100644 +--- a/fs/erofs/inode.c ++++ b/fs/erofs/inode.c +@@ -205,12 +205,14 @@ static int erofs_fill_symlink(struct inode *inode, void *kaddr, + unsigned int m_pofs) + { + struct erofs_inode *vi = EROFS_I(inode); +- unsigned int bsz = i_blocksize(inode); ++ loff_t off; + char *lnk; + +- /* if it cannot be handled with fast symlink scheme */ +- if (vi->datalayout != EROFS_INODE_FLAT_INLINE || +- inode->i_size >= bsz || inode->i_size < 0) { ++ m_pofs += vi->xattr_isize; ++ /* check if it cannot be handled with fast symlink scheme */ ++ if (vi->datalayout != EROFS_INODE_FLAT_INLINE || inode->i_size < 0 || ++ check_add_overflow(m_pofs, inode->i_size, &off) || ++ off > i_blocksize(inode)) { + inode->i_op = &erofs_symlink_iops; + return 0; + } +@@ -219,16 +221,6 @@ static int erofs_fill_symlink(struct inode *inode, void *kaddr, + if (!lnk) + return -ENOMEM; + +- m_pofs += vi->xattr_isize; +- /* inline symlink data shouldn't cross block boundary */ +- if (m_pofs + inode->i_size > bsz) { +- kfree(lnk); +- erofs_err(inode->i_sb, +- "inline data cross block boundary @ nid %llu", +- vi->nid); +- DBG_BUGON(1); +- return -EFSCORRUPTED; +- } + memcpy(lnk, kaddr + m_pofs, inode->i_size); + lnk[inode->i_size] = '\0'; + +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c +index b65058d972f956..1a1e2214c581f3 100644 +--- a/fs/ext4/ialloc.c ++++ b/fs/ext4/ialloc.c +@@ -514,6 +514,8 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent, + if (min_inodes < 1) + min_inodes = 1; + min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4; ++ if (min_clusters < 0) ++ min_clusters = 0; + + /* + * Start looking in the flex group where we last allocated an +@@ -755,10 +757,10 @@ int ext4_mark_inode_used(struct super_block *sb, int ino) + struct ext4_group_desc *gdp; + ext4_group_t group; + int bit; +- int err = -EFSCORRUPTED; ++ int err; + + if (ino < EXT4_FIRST_INO(sb) || ino > max_ino) +- goto out; ++ return -EFSCORRUPTED; + + group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); + bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); +@@ -860,6 +862,7 @@ int ext4_mark_inode_used(struct super_block *sb, int ino) + err = ext4_handle_dirty_metadata(NULL, NULL, group_desc_bh); + sync_dirty_buffer(group_desc_bh); + out: ++ brelse(inode_bitmap_bh); + return err; + } + +@@ -1053,12 +1056,13 @@ struct inode *__ext4_new_inode(struct mnt_idmap *idmap, + brelse(inode_bitmap_bh); + inode_bitmap_bh = ext4_read_inode_bitmap(sb, group); + /* Skip groups with suspicious inode tables */ +- if (((!(sbi->s_mount_state & EXT4_FC_REPLAY)) +- && EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) || +- IS_ERR(inode_bitmap_bh)) { ++ if (IS_ERR(inode_bitmap_bh)) { + inode_bitmap_bh = NULL; + goto next_group; + } ++ if (!(sbi->s_mount_state & EXT4_FC_REPLAY) && ++ EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) ++ goto next_group; + + repeat_in_this_group: + ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino); +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c +index a604aa1d23aedd..cb65052ee3dec6 100644 +--- a/fs/ext4/inline.c ++++ b/fs/ext4/inline.c +@@ -1665,24 +1665,36 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir, + struct ext4_dir_entry_2 **res_dir, + int *has_inline_data) + { ++ struct ext4_xattr_ibody_find is = { ++ .s = { .not_found = -ENODATA, }, ++ }; ++ struct ext4_xattr_info i = { ++ .name_index = EXT4_XATTR_INDEX_SYSTEM, ++ .name = EXT4_XATTR_SYSTEM_DATA, ++ }; + int ret; +- struct ext4_iloc iloc; + void *inline_start; + int inline_size; + +- if (ext4_get_inode_loc(dir, &iloc)) +- return NULL; ++ ret = ext4_get_inode_loc(dir, &is.iloc); ++ if (ret) ++ return ERR_PTR(ret); + + down_read(&EXT4_I(dir)->xattr_sem); ++ ++ ret = ext4_xattr_ibody_find(dir, &i, &is); ++ if (ret) ++ goto out; ++ + if (!ext4_has_inline_data(dir)) { + *has_inline_data = 0; + goto out; + } + +- inline_start = (void *)ext4_raw_inode(&iloc)->i_block + ++ inline_start = (void *)ext4_raw_inode(&is.iloc)->i_block + + EXT4_INLINE_DOTDOT_SIZE; + inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE; +- ret = ext4_search_dir(iloc.bh, inline_start, inline_size, ++ ret = ext4_search_dir(is.iloc.bh, inline_start, inline_size, + dir, fname, 0, res_dir); + if (ret == 1) + goto out_find; +@@ -1692,20 +1704,23 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir, + if (ext4_get_inline_size(dir) == EXT4_MIN_INLINE_DATA_SIZE) + goto out; + +- inline_start = ext4_get_inline_xattr_pos(dir, &iloc); ++ inline_start = ext4_get_inline_xattr_pos(dir, &is.iloc); + inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE; + +- ret = ext4_search_dir(iloc.bh, inline_start, inline_size, ++ ret = ext4_search_dir(is.iloc.bh, inline_start, inline_size, + dir, fname, 0, res_dir); + if (ret == 1) + goto out_find; + + out: +- brelse(iloc.bh); +- iloc.bh = NULL; ++ brelse(is.iloc.bh); ++ if (ret < 0) ++ is.iloc.bh = ERR_PTR(ret); ++ else ++ is.iloc.bh = NULL; + out_find: + up_read(&EXT4_I(dir)->xattr_sem); +- return iloc.bh; ++ return is.iloc.bh; + } + + int ext4_delete_inline_entry(handle_t *handle, +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index 870397f3de5595..87ba7f58216f70 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -3886,11 +3886,8 @@ static void ext4_free_data_in_buddy(struct super_block *sb, + /* + * Clear the trimmed flag for the group so that the next + * ext4_trim_fs can trim it. +- * If the volume is mounted with -o discard, online discard +- * is supported and the free blocks will be trimmed online. + */ +- if (!test_opt(sb, DISCARD)) +- EXT4_MB_GRP_CLEAR_TRIMMED(db); ++ EXT4_MB_GRP_CLEAR_TRIMMED(db); + + if (!db->bb_free_root.rb_node) { + /* No more items in the per group rb tree +@@ -6586,8 +6583,9 @@ static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, + " group:%u block:%d count:%lu failed" + " with %d", block_group, bit, count, + err); +- } else +- EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); ++ } ++ ++ EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); + + ext4_lock_group(sb, block_group); + mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 5baacb3058abd0..46c4f750497918 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -5205,6 +5205,18 @@ static int ext4_block_group_meta_init(struct super_block *sb, int silent) + return 0; + } + ++/* ++ * It's hard to get stripe aligned blocks if stripe is not aligned with ++ * cluster, just disable stripe and alert user to simplify code and avoid ++ * stripe aligned allocation which will rarely succeed. ++ */ ++static bool ext4_is_stripe_incompatible(struct super_block *sb, unsigned long stripe) ++{ ++ struct ext4_sb_info *sbi = EXT4_SB(sb); ++ return (stripe > 0 && sbi->s_cluster_ratio > 1 && ++ stripe % sbi->s_cluster_ratio != 0); ++} ++ + static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) + { + struct ext4_super_block *es = NULL; +@@ -5312,13 +5324,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) + goto failed_mount3; + + sbi->s_stripe = ext4_get_stripe_size(sbi); +- /* +- * It's hard to get stripe aligned blocks if stripe is not aligned with +- * cluster, just disable stripe and alert user to simpfy code and avoid +- * stripe aligned allocation which will rarely successes. +- */ +- if (sbi->s_stripe > 0 && sbi->s_cluster_ratio > 1 && +- sbi->s_stripe % sbi->s_cluster_ratio != 0) { ++ if (ext4_is_stripe_incompatible(sb, sbi->s_stripe)) { + ext4_msg(sb, KERN_WARNING, + "stripe (%lu) is not aligned with cluster size (%u), " + "stripe is disabled", +@@ -6482,6 +6488,15 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb) + + } + ++ if ((ctx->spec & EXT4_SPEC_s_stripe) && ++ ext4_is_stripe_incompatible(sb, ctx->s_stripe)) { ++ ext4_msg(sb, KERN_WARNING, ++ "stripe (%lu) is not aligned with cluster size (%u), " ++ "stripe is disabled", ++ ctx->s_stripe, sbi->s_cluster_ratio); ++ ctx->s_stripe = 0; ++ } ++ + /* + * Changing the DIOREAD_NOLOCK or DELALLOC mount options may cause + * two calls to ext4_should_dioread_nolock() to return inconsistent +diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c +index c07fe6b840a091..f7ef69f44f3d84 100644 +--- a/fs/f2fs/compress.c ++++ b/fs/f2fs/compress.c +@@ -887,14 +887,15 @@ static bool cluster_has_invalid_data(struct compress_ctx *cc) + + bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) + { ++#ifdef CONFIG_F2FS_CHECK_FS + struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); + unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size; +- bool compressed = dn->data_blkaddr == COMPRESS_ADDR; + int cluster_end = 0; ++ unsigned int count; + int i; + char *reason = ""; + +- if (!compressed) ++ if (dn->data_blkaddr != COMPRESS_ADDR) + return false; + + /* [..., COMPR_ADDR, ...] */ +@@ -903,7 +904,7 @@ bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) + goto out; + } + +- for (i = 1; i < cluster_size; i++) { ++ for (i = 1, count = 1; i < cluster_size; i++, count++) { + block_t blkaddr = data_blkaddr(dn->inode, dn->node_page, + dn->ofs_in_node + i); + +@@ -923,19 +924,42 @@ bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) + goto out; + } + } ++ ++ f2fs_bug_on(F2FS_I_SB(dn->inode), count != cluster_size && ++ !is_inode_flag_set(dn->inode, FI_COMPRESS_RELEASED)); ++ + return false; + out: + f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s", + dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason); + set_sbi_flag(sbi, SBI_NEED_FSCK); + return true; ++#else ++ return false; ++#endif + } + +-static int __f2fs_cluster_blocks(struct inode *inode, +- unsigned int cluster_idx, bool compr) ++static int __f2fs_get_cluster_blocks(struct inode *inode, ++ struct dnode_of_data *dn) + { +- struct dnode_of_data dn; + unsigned int cluster_size = F2FS_I(inode)->i_cluster_size; ++ int count, i; ++ ++ for (i = 0, count = 0; i < cluster_size; i++) { ++ block_t blkaddr = data_blkaddr(dn->inode, dn->node_page, ++ dn->ofs_in_node + i); ++ ++ if (__is_valid_data_blkaddr(blkaddr)) ++ count++; ++ } ++ ++ return count; ++} ++ ++static int __f2fs_cluster_blocks(struct inode *inode, unsigned int cluster_idx, ++ enum cluster_check_type type) ++{ ++ struct dnode_of_data dn; + unsigned int start_idx = cluster_idx << + F2FS_I(inode)->i_log_cluster_size; + int ret; +@@ -950,31 +974,16 @@ static int __f2fs_cluster_blocks(struct inode *inode, + + if (f2fs_sanity_check_cluster(&dn)) { + ret = -EFSCORRUPTED; +- f2fs_handle_error(F2FS_I_SB(inode), ERROR_CORRUPTED_CLUSTER); + goto fail; + } + + if (dn.data_blkaddr == COMPRESS_ADDR) { +- int i; +- +- ret = 1; +- for (i = 1; i < cluster_size; i++) { +- block_t blkaddr; +- +- blkaddr = data_blkaddr(dn.inode, +- dn.node_page, dn.ofs_in_node + i); +- if (compr) { +- if (__is_valid_data_blkaddr(blkaddr)) +- ret++; +- } else { +- if (blkaddr != NULL_ADDR) +- ret++; +- } +- } +- +- f2fs_bug_on(F2FS_I_SB(inode), +- !compr && ret != cluster_size && +- !is_inode_flag_set(inode, FI_COMPRESS_RELEASED)); ++ if (type == CLUSTER_COMPR_BLKS) ++ ret = 1 + __f2fs_get_cluster_blocks(inode, &dn); ++ else if (type == CLUSTER_IS_COMPR) ++ ret = 1; ++ } else if (type == CLUSTER_RAW_BLKS) { ++ ret = __f2fs_get_cluster_blocks(inode, &dn); + } + fail: + f2fs_put_dnode(&dn); +@@ -984,15 +993,33 @@ static int __f2fs_cluster_blocks(struct inode *inode, + /* return # of compressed blocks in compressed cluster */ + static int f2fs_compressed_blocks(struct compress_ctx *cc) + { +- return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true); ++ return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, ++ CLUSTER_COMPR_BLKS); + } + +-/* return # of valid blocks in compressed cluster */ ++/* return # of raw blocks in non-compressed cluster */ ++static int f2fs_decompressed_blocks(struct inode *inode, ++ unsigned int cluster_idx) ++{ ++ return __f2fs_cluster_blocks(inode, cluster_idx, ++ CLUSTER_RAW_BLKS); ++} ++ ++/* return whether cluster is compressed one or not */ + int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index) + { + return __f2fs_cluster_blocks(inode, + index >> F2FS_I(inode)->i_log_cluster_size, +- false); ++ CLUSTER_IS_COMPR); ++} ++ ++/* return whether cluster contains non raw blocks or not */ ++bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index) ++{ ++ unsigned int cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size; ++ ++ return f2fs_decompressed_blocks(inode, cluster_idx) != ++ F2FS_I(inode)->i_cluster_size; + } + + static bool cluster_may_compress(struct compress_ctx *cc) +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c +index 84fc87018180f7..1c59a3b2b2c348 100644 +--- a/fs/f2fs/data.c ++++ b/fs/f2fs/data.c +@@ -1614,9 +1614,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag) + map->m_flags |= F2FS_MAP_NEW; + } else if (is_hole) { + if (f2fs_compressed_file(inode) && +- f2fs_sanity_check_cluster(&dn) && +- (flag != F2FS_GET_BLOCK_FIEMAP || +- IS_ENABLED(CONFIG_F2FS_CHECK_FS))) { ++ f2fs_sanity_check_cluster(&dn)) { + err = -EFSCORRUPTED; + f2fs_handle_error(sbi, + ERROR_CORRUPTED_CLUSTER); +@@ -2623,10 +2621,13 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio) + struct dnode_of_data dn; + struct node_info ni; + bool ipu_force = false; ++ bool atomic_commit; + int err = 0; + + /* Use COW inode to make dnode_of_data for atomic write */ +- if (f2fs_is_atomic_file(inode)) ++ atomic_commit = f2fs_is_atomic_file(inode) && ++ page_private_atomic(fio->page); ++ if (atomic_commit) + set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0); + else + set_new_dnode(&dn, inode, NULL, NULL, 0); +@@ -2730,6 +2731,8 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio) + f2fs_outplace_write_data(&dn, fio); + trace_f2fs_do_write_data_page(page, OPU); + set_inode_flag(inode, FI_APPEND_WRITE); ++ if (atomic_commit) ++ clear_page_private_atomic(page); + out_writepage: + f2fs_put_dnode(&dn); + out: +@@ -3700,6 +3703,9 @@ static int f2fs_write_end(struct file *file, + + set_page_dirty(page); + ++ if (f2fs_is_atomic_file(inode)) ++ set_page_private_atomic(page); ++ + if (pos + copied > i_size_read(inode) && + !f2fs_verity_in_progress(inode)) { + f2fs_i_size_write(inode, pos + copied); +diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c +index c624ffff6f19a1..166ec8942595e1 100644 +--- a/fs/f2fs/dir.c ++++ b/fs/f2fs/dir.c +@@ -157,7 +157,8 @@ static unsigned long dir_block_index(unsigned int level, + unsigned long bidx = 0; + + for (i = 0; i < level; i++) +- bidx += dir_buckets(i, dir_level) * bucket_blocks(i); ++ bidx += mul_u32_u32(dir_buckets(i, dir_level), ++ bucket_blocks(i)); + bidx += idx * bucket_blocks(level); + return bidx; + } +diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c +index 6a9a470345bfc7..d6fb053b6dfbbe 100644 +--- a/fs/f2fs/extent_cache.c ++++ b/fs/f2fs/extent_cache.c +@@ -367,7 +367,7 @@ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi, + static void __drop_largest_extent(struct extent_tree *et, + pgoff_t fofs, unsigned int len) + { +- if (fofs < et->largest.fofs + et->largest.len && ++ if (fofs < (pgoff_t)et->largest.fofs + et->largest.len && + fofs + len > et->largest.fofs) { + et->largest.len = 0; + et->largest_updated = true; +@@ -457,7 +457,7 @@ static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs, + + if (type == EX_READ && + et->largest.fofs <= pgofs && +- et->largest.fofs + et->largest.len > pgofs) { ++ (pgoff_t)et->largest.fofs + et->largest.len > pgofs) { + *ei = et->largest; + ret = true; + stat_inc_largest_node_hit(sbi); +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h +index 6371b295fba68f..7faf9446ea5dcb 100644 +--- a/fs/f2fs/f2fs.h ++++ b/fs/f2fs/f2fs.h +@@ -282,6 +282,7 @@ enum { + APPEND_INO, /* for append ino list */ + UPDATE_INO, /* for update ino list */ + TRANS_DIR_INO, /* for transactions dir ino list */ ++ XATTR_DIR_INO, /* for xattr updated dir ino list */ + FLUSH_INO, /* for multiple device flushing */ + MAX_INO_ENTRY, /* max. list */ + }; +@@ -779,7 +780,6 @@ enum { + FI_NEED_IPU, /* used for ipu per file */ + FI_ATOMIC_FILE, /* indicate atomic file */ + FI_DATA_EXIST, /* indicate data exists */ +- FI_INLINE_DOTS, /* indicate inline dot dentries */ + FI_SKIP_WRITES, /* should skip data page writeback */ + FI_OPU_WRITE, /* used for opu per file */ + FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ +@@ -797,6 +797,7 @@ enum { + FI_ALIGNED_WRITE, /* enable aligned write */ + FI_COW_FILE, /* indicate COW file */ + FI_ATOMIC_COMMITTED, /* indicate atomic commit completed except disk sync */ ++ FI_ATOMIC_DIRTIED, /* indicate atomic file is dirtied */ + FI_ATOMIC_REPLACE, /* indicate atomic replace */ + FI_OPENED_FILE, /* indicate file has been opened */ + FI_MAX, /* max flag, never be used */ +@@ -1149,6 +1150,7 @@ enum cp_reason_type { + CP_FASTBOOT_MODE, + CP_SPEC_LOG_NUM, + CP_RECOVER_DIR, ++ CP_XATTR_DIR, + }; + + enum iostat_type { +@@ -1411,7 +1413,8 @@ static inline void f2fs_clear_bit(unsigned int nr, char *addr); + * bit 1 PAGE_PRIVATE_ONGOING_MIGRATION + * bit 2 PAGE_PRIVATE_INLINE_INODE + * bit 3 PAGE_PRIVATE_REF_RESOURCE +- * bit 4- f2fs private data ++ * bit 4 PAGE_PRIVATE_ATOMIC_WRITE ++ * bit 5- f2fs private data + * + * Layout B: lowest bit should be 0 + * page.private is a wrapped pointer. +@@ -1421,6 +1424,7 @@ enum { + PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */ + PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */ + PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */ ++ PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */ + PAGE_PRIVATE_MAX + }; + +@@ -2386,14 +2390,17 @@ static inline void clear_page_private_##name(struct page *page) \ + PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER); + PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE); + PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION); ++PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE); + + PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE); + PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE); + PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION); ++PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE); + + PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE); + PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE); + PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION); ++PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE); + + static inline unsigned long get_page_private_data(struct page *page) + { +@@ -2425,6 +2432,7 @@ static inline void clear_page_private_all(struct page *page) + clear_page_private_reference(page); + clear_page_private_gcing(page); + clear_page_private_inline(page); ++ clear_page_private_atomic(page); + + f2fs_bug_on(F2FS_P_SB(page), page_private(page)); + } +@@ -3028,10 +3036,8 @@ static inline void __mark_inode_dirty_flag(struct inode *inode, + return; + fallthrough; + case FI_DATA_EXIST: +- case FI_INLINE_DOTS: + case FI_PIN_FILE: + case FI_COMPRESS_RELEASED: +- case FI_ATOMIC_COMMITTED: + f2fs_mark_inode_dirty_sync(inode, true); + } + } +@@ -3153,8 +3159,6 @@ static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) + set_bit(FI_INLINE_DENTRY, fi->flags); + if (ri->i_inline & F2FS_DATA_EXIST) + set_bit(FI_DATA_EXIST, fi->flags); +- if (ri->i_inline & F2FS_INLINE_DOTS) +- set_bit(FI_INLINE_DOTS, fi->flags); + if (ri->i_inline & F2FS_EXTRA_ATTR) + set_bit(FI_EXTRA_ATTR, fi->flags); + if (ri->i_inline & F2FS_PIN_FILE) +@@ -3175,8 +3179,6 @@ static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) + ri->i_inline |= F2FS_INLINE_DENTRY; + if (is_inode_flag_set(inode, FI_DATA_EXIST)) + ri->i_inline |= F2FS_DATA_EXIST; +- if (is_inode_flag_set(inode, FI_INLINE_DOTS)) +- ri->i_inline |= F2FS_INLINE_DOTS; + if (is_inode_flag_set(inode, FI_EXTRA_ATTR)) + ri->i_inline |= F2FS_EXTRA_ATTR; + if (is_inode_flag_set(inode, FI_PIN_FILE)) +@@ -3263,11 +3265,6 @@ static inline int f2fs_exist_data(struct inode *inode) + return is_inode_flag_set(inode, FI_DATA_EXIST); + } + +-static inline int f2fs_has_inline_dots(struct inode *inode) +-{ +- return is_inode_flag_set(inode, FI_INLINE_DOTS); +-} +- + static inline int f2fs_is_mmap_file(struct inode *inode) + { + return is_inode_flag_set(inode, FI_MMAP_FILE); +@@ -3373,17 +3370,6 @@ static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) + return is_set_ckpt_flags(sbi, CP_ERROR_FLAG); + } + +-static inline bool is_dot_dotdot(const u8 *name, size_t len) +-{ +- if (len == 1 && name[0] == '.') +- return true; +- +- if (len == 2 && name[0] == '.' && name[1] == '.') +- return true; +- +- return false; +-} +- + static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, + size_t size, gfp_t flags) + { +@@ -3499,6 +3485,8 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, + struct iattr *attr); + int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); + void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); ++int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag, ++ bool readonly, bool need_lock); + int f2fs_precache_extents(struct inode *inode); + int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa); + int f2fs_fileattr_set(struct mnt_idmap *idmap, +@@ -4274,6 +4262,11 @@ static inline bool f2fs_meta_inode_gc_required(struct inode *inode) + * compress.c + */ + #ifdef CONFIG_F2FS_FS_COMPRESSION ++enum cluster_check_type { ++ CLUSTER_IS_COMPR, /* check only if compressed cluster */ ++ CLUSTER_COMPR_BLKS, /* return # of compressed blocks in a cluster */ ++ CLUSTER_RAW_BLKS /* return # of raw blocks in a cluster */ ++}; + bool f2fs_is_compressed_page(struct page *page); + struct page *f2fs_compress_control_page(struct page *page); + int f2fs_prepare_compress_overwrite(struct inode *inode, +@@ -4300,6 +4293,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc, + struct writeback_control *wbc, + enum iostat_type io_type); + int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); ++bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index); + void f2fs_update_read_extent_tree_range_compressed(struct inode *inode, + pgoff_t fofs, block_t blkaddr, + unsigned int llen, unsigned int c_len); +@@ -4386,6 +4380,12 @@ static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, + static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, + nid_t ino) { } + #define inc_compr_inode_stat(inode) do { } while (0) ++static inline int f2fs_is_compressed_cluster( ++ struct inode *inode, ++ pgoff_t index) { return 0; } ++static inline bool f2fs_is_sparse_cluster( ++ struct inode *inode, ++ pgoff_t index) { return true; } + static inline void f2fs_update_read_extent_tree_range_compressed( + struct inode *inode, + pgoff_t fofs, block_t blkaddr, +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c +index 523896200908af..74fac935bd0923 100644 +--- a/fs/f2fs/file.c ++++ b/fs/f2fs/file.c +@@ -213,6 +213,9 @@ static inline enum cp_reason_type need_do_checkpoint(struct inode *inode) + f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino, + TRANS_DIR_INO)) + cp_reason = CP_RECOVER_DIR; ++ else if (f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino, ++ XATTR_DIR_INO)) ++ cp_reason = CP_XATTR_DIR; + + return cp_reason; + } +@@ -2094,10 +2097,12 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate) + struct mnt_idmap *idmap = file_mnt_idmap(filp); + struct f2fs_inode_info *fi = F2FS_I(inode); + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); +- struct inode *pinode; + loff_t isize; + int ret; + ++ if (!(filp->f_mode & FMODE_WRITE)) ++ return -EBADF; ++ + if (!inode_owner_or_capable(idmap, inode)) + return -EACCES; + +@@ -2143,15 +2148,10 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate) + /* Check if the inode already has a COW inode */ + if (fi->cow_inode == NULL) { + /* Create a COW inode for atomic write */ +- pinode = f2fs_iget(inode->i_sb, fi->i_pino); +- if (IS_ERR(pinode)) { +- f2fs_up_write(&fi->i_gc_rwsem[WRITE]); +- ret = PTR_ERR(pinode); +- goto out; +- } ++ struct dentry *dentry = file_dentry(filp); ++ struct inode *dir = d_inode(dentry->d_parent); + +- ret = f2fs_get_tmpfile(idmap, pinode, &fi->cow_inode); +- iput(pinode); ++ ret = f2fs_get_tmpfile(idmap, dir, &fi->cow_inode); + if (ret) { + f2fs_up_write(&fi->i_gc_rwsem[WRITE]); + goto out; +@@ -2164,6 +2164,10 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate) + F2FS_I(fi->cow_inode)->atomic_inode = inode; + } else { + /* Reuse the already created COW inode */ ++ f2fs_bug_on(sbi, get_dirty_pages(fi->cow_inode)); ++ ++ invalidate_mapping_pages(fi->cow_inode->i_mapping, 0, -1); ++ + ret = f2fs_do_truncate_blocks(fi->cow_inode, 0, true); + if (ret) { + f2fs_up_write(&fi->i_gc_rwsem[WRITE]); +@@ -2205,6 +2209,9 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp) + struct mnt_idmap *idmap = file_mnt_idmap(filp); + int ret; + ++ if (!(filp->f_mode & FMODE_WRITE)) ++ return -EBADF; ++ + if (!inode_owner_or_capable(idmap, inode)) + return -EACCES; + +@@ -2237,6 +2244,9 @@ static int f2fs_ioc_abort_atomic_write(struct file *filp) + struct mnt_idmap *idmap = file_mnt_idmap(filp); + int ret; + ++ if (!(filp->f_mode & FMODE_WRITE)) ++ return -EBADF; ++ + if (!inode_owner_or_capable(idmap, inode)) + return -EACCES; + +@@ -2255,34 +2265,13 @@ static int f2fs_ioc_abort_atomic_write(struct file *filp) + return ret; + } + +-static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) ++int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag, ++ bool readonly, bool need_lock) + { +- struct inode *inode = file_inode(filp); +- struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct super_block *sb = sbi->sb; +- __u32 in; + int ret = 0; + +- if (!capable(CAP_SYS_ADMIN)) +- return -EPERM; +- +- if (get_user(in, (__u32 __user *)arg)) +- return -EFAULT; +- +- if (in != F2FS_GOING_DOWN_FULLSYNC) { +- ret = mnt_want_write_file(filp); +- if (ret) { +- if (ret == -EROFS) { +- ret = 0; +- f2fs_stop_checkpoint(sbi, false, +- STOP_CP_REASON_SHUTDOWN); +- trace_f2fs_shutdown(sbi, in, ret); +- } +- return ret; +- } +- } +- +- switch (in) { ++ switch (flag) { + case F2FS_GOING_DOWN_FULLSYNC: + ret = freeze_bdev(sb->s_bdev); + if (ret) +@@ -2316,18 +2305,62 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) + goto out; + } + ++ if (readonly) ++ goto out; ++ ++ /* grab sb->s_umount to avoid racing w/ remount() */ ++ if (need_lock) ++ down_read(&sbi->sb->s_umount); ++ + f2fs_stop_gc_thread(sbi); + f2fs_stop_discard_thread(sbi); + + f2fs_drop_discard_cmd(sbi); + clear_opt(sbi, DISCARD); + ++ if (need_lock) ++ up_read(&sbi->sb->s_umount); ++ + f2fs_update_time(sbi, REQ_TIME); + out: +- if (in != F2FS_GOING_DOWN_FULLSYNC) +- mnt_drop_write_file(filp); + +- trace_f2fs_shutdown(sbi, in, ret); ++ trace_f2fs_shutdown(sbi, flag, ret); ++ ++ return ret; ++} ++ ++static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) ++{ ++ struct inode *inode = file_inode(filp); ++ struct f2fs_sb_info *sbi = F2FS_I_SB(inode); ++ __u32 in; ++ int ret; ++ bool need_drop = false, readonly = false; ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ if (get_user(in, (__u32 __user *)arg)) ++ return -EFAULT; ++ ++ if (in != F2FS_GOING_DOWN_FULLSYNC) { ++ ret = mnt_want_write_file(filp); ++ if (ret) { ++ if (ret != -EROFS) ++ return ret; ++ ++ /* fallback to nosync shutdown for readonly fs */ ++ in = F2FS_GOING_DOWN_NOSYNC; ++ readonly = true; ++ } else { ++ need_drop = true; ++ } ++ } ++ ++ ret = f2fs_do_shutdown(sbi, in, readonly, true); ++ ++ if (need_drop) ++ mnt_drop_write_file(filp); + + return ret; + } +@@ -2640,7 +2673,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, + + inode_lock(inode); + +- if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) { ++ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) || ++ f2fs_is_atomic_file(inode)) { + err = -EINVAL; + goto unlock_out; + } +@@ -2663,7 +2697,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, + * block addresses are continuous. + */ + if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) { +- if (ei.fofs + ei.len >= pg_end) ++ if ((pgoff_t)ei.fofs + ei.len >= pg_end) + goto out; + } + +@@ -2746,6 +2780,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, + goto clear_out; + } + ++ f2fs_wait_on_page_writeback(page, DATA, true, true); ++ + set_page_dirty(page); + set_page_private_gcing(page); + f2fs_put_page(page, 1); +@@ -2869,6 +2905,11 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, + goto out_unlock; + } + ++ if (f2fs_is_atomic_file(src) || f2fs_is_atomic_file(dst)) { ++ ret = -EINVAL; ++ goto out_unlock; ++ } ++ + ret = -EINVAL; + if (pos_in + len > src->i_size || pos_in + len < pos_in) + goto out_unlock; +@@ -3253,6 +3294,11 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg) + + inode_lock(inode); + ++ if (f2fs_is_atomic_file(inode)) { ++ ret = -EINVAL; ++ goto out; ++ } ++ + if (!pin) { + clear_inode_flag(inode, FI_PIN_FILE); + f2fs_i_gc_failures_write(inode, 0); +@@ -4140,6 +4186,8 @@ static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len) + /* It will never fail, when page has pinned above */ + f2fs_bug_on(F2FS_I_SB(inode), !page); + ++ f2fs_wait_on_page_writeback(page, DATA, true, true); ++ + set_page_dirty(page); + set_page_private_gcing(page); + f2fs_put_page(page, 1); +@@ -4154,9 +4202,8 @@ static int f2fs_ioc_decompress_file(struct file *filp) + struct inode *inode = file_inode(filp); + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct f2fs_inode_info *fi = F2FS_I(inode); +- pgoff_t page_idx = 0, last_idx; +- int cluster_size = fi->i_cluster_size; +- int count, ret; ++ pgoff_t page_idx = 0, last_idx, cluster_idx; ++ int ret; + + if (!f2fs_sb_has_compression(sbi) || + F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER) +@@ -4189,10 +4236,15 @@ static int f2fs_ioc_decompress_file(struct file *filp) + goto out; + + last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); ++ last_idx >>= fi->i_log_cluster_size; ++ ++ for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) { ++ page_idx = cluster_idx << fi->i_log_cluster_size; ++ ++ if (!f2fs_is_compressed_cluster(inode, page_idx)) ++ continue; + +- count = last_idx - page_idx; +- while (count && count >= cluster_size) { +- ret = redirty_blocks(inode, page_idx, cluster_size); ++ ret = redirty_blocks(inode, page_idx, fi->i_cluster_size); + if (ret < 0) + break; + +@@ -4202,9 +4254,6 @@ static int f2fs_ioc_decompress_file(struct file *filp) + break; + } + +- count -= cluster_size; +- page_idx += cluster_size; +- + cond_resched(); + if (fatal_signal_pending(current)) { + ret = -EINTR; +@@ -4230,9 +4279,9 @@ static int f2fs_ioc_compress_file(struct file *filp) + { + struct inode *inode = file_inode(filp); + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); +- pgoff_t page_idx = 0, last_idx; +- int cluster_size = F2FS_I(inode)->i_cluster_size; +- int count, ret; ++ struct f2fs_inode_info *fi = F2FS_I(inode); ++ pgoff_t page_idx = 0, last_idx, cluster_idx; ++ int ret; + + if (!f2fs_sb_has_compression(sbi) || + F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER) +@@ -4264,10 +4313,15 @@ static int f2fs_ioc_compress_file(struct file *filp) + set_inode_flag(inode, FI_ENABLE_COMPRESS); + + last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); ++ last_idx >>= fi->i_log_cluster_size; ++ ++ for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) { ++ page_idx = cluster_idx << fi->i_log_cluster_size; + +- count = last_idx - page_idx; +- while (count && count >= cluster_size) { +- ret = redirty_blocks(inode, page_idx, cluster_size); ++ if (f2fs_is_sparse_cluster(inode, page_idx)) ++ continue; ++ ++ ret = redirty_blocks(inode, page_idx, fi->i_cluster_size); + if (ret < 0) + break; + +@@ -4277,9 +4331,6 @@ static int f2fs_ioc_compress_file(struct file *filp) + break; + } + +- count -= cluster_size; +- page_idx += cluster_size; +- + cond_resched(); + if (fatal_signal_pending(current)) { + ret = -EINTR; +@@ -4538,6 +4589,10 @@ static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) + f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos, + iov_iter_count(to), READ); + ++ /* In LFS mode, if there is inflight dio, wait for its completion */ ++ if (f2fs_lfs_mode(F2FS_I_SB(inode))) ++ inode_dio_wait(inode); ++ + if (f2fs_should_use_dio(inode, iocb, to)) { + ret = f2fs_dio_read_iter(iocb, to); + } else { +diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c +index 709b2f79872f27..a3e0c927354331 100644 +--- a/fs/f2fs/inode.c ++++ b/fs/f2fs/inode.c +@@ -35,6 +35,11 @@ void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync) + if (f2fs_inode_dirtied(inode, sync)) + return; + ++ if (f2fs_is_atomic_file(inode)) { ++ set_inode_flag(inode, FI_ATOMIC_DIRTIED); ++ return; ++ } ++ + mark_inode_dirty_sync(inode); + } + +diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c +index 7bca22e5dec4f3..2e08e1fdf485c7 100644 +--- a/fs/f2fs/namei.c ++++ b/fs/f2fs/namei.c +@@ -455,63 +455,6 @@ struct dentry *f2fs_get_parent(struct dentry *child) + return d_obtain_alias(f2fs_iget(child->d_sb, ino)); + } + +-static int __recover_dot_dentries(struct inode *dir, nid_t pino) +-{ +- struct f2fs_sb_info *sbi = F2FS_I_SB(dir); +- struct qstr dot = QSTR_INIT(".", 1); +- struct qstr dotdot = QSTR_INIT("..", 2); +- struct f2fs_dir_entry *de; +- struct page *page; +- int err = 0; +- +- if (f2fs_readonly(sbi->sb)) { +- f2fs_info(sbi, "skip recovering inline_dots inode (ino:%lu, pino:%u) in readonly mountpoint", +- dir->i_ino, pino); +- return 0; +- } +- +- if (!S_ISDIR(dir->i_mode)) { +- f2fs_err(sbi, "inconsistent inode status, skip recovering inline_dots inode (ino:%lu, i_mode:%u, pino:%u)", +- dir->i_ino, dir->i_mode, pino); +- set_sbi_flag(sbi, SBI_NEED_FSCK); +- return -ENOTDIR; +- } +- +- err = f2fs_dquot_initialize(dir); +- if (err) +- return err; +- +- f2fs_balance_fs(sbi, true); +- +- f2fs_lock_op(sbi); +- +- de = f2fs_find_entry(dir, &dot, &page); +- if (de) { +- f2fs_put_page(page, 0); +- } else if (IS_ERR(page)) { +- err = PTR_ERR(page); +- goto out; +- } else { +- err = f2fs_do_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR); +- if (err) +- goto out; +- } +- +- de = f2fs_find_entry(dir, &dotdot, &page); +- if (de) +- f2fs_put_page(page, 0); +- else if (IS_ERR(page)) +- err = PTR_ERR(page); +- else +- err = f2fs_do_add_link(dir, &dotdot, NULL, pino, S_IFDIR); +-out: +- if (!err) +- clear_inode_flag(dir, FI_INLINE_DOTS); +- +- f2fs_unlock_op(sbi); +- return err; +-} +- + static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, + unsigned int flags) + { +@@ -521,7 +464,6 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, + struct dentry *new; + nid_t ino = -1; + int err = 0; +- unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir)); + struct f2fs_filename fname; + + trace_f2fs_lookup_start(dir, dentry, flags); +@@ -558,17 +500,6 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, + goto out; + } + +- if ((dir->i_ino == root_ino) && f2fs_has_inline_dots(dir)) { +- err = __recover_dot_dentries(dir, root_ino); +- if (err) +- goto out_iput; +- } +- +- if (f2fs_has_inline_dots(inode)) { +- err = __recover_dot_dentries(inode, dir->i_ino); +- if (err) +- goto out_iput; +- } + if (IS_ENCRYPTED(dir) && + (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) && + !fscrypt_has_permitted_context(dir, inode)) { +diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c +index e3e2c0b2f49597..c0ba379a6d8f3e 100644 +--- a/fs/f2fs/segment.c ++++ b/fs/f2fs/segment.c +@@ -199,6 +199,10 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean) + clear_inode_flag(inode, FI_ATOMIC_COMMITTED); + clear_inode_flag(inode, FI_ATOMIC_REPLACE); + clear_inode_flag(inode, FI_ATOMIC_FILE); ++ if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) { ++ clear_inode_flag(inode, FI_ATOMIC_DIRTIED); ++ f2fs_mark_inode_dirty_sync(inode, true); ++ } + stat_dec_atomic_inode(inode); + + F2FS_I(inode)->atomic_write_task = NULL; +@@ -368,6 +372,10 @@ static int __f2fs_commit_atomic_write(struct inode *inode) + } else { + sbi->committed_atomic_block += fi->atomic_write_cnt; + set_inode_flag(inode, FI_ATOMIC_COMMITTED); ++ if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) { ++ clear_inode_flag(inode, FI_ATOMIC_DIRTIED); ++ f2fs_mark_inode_dirty_sync(inode, true); ++ } + } + + __complete_revoke_list(inode, &revoke_list, ret ? true : false); +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c +index e022d8233c0a5f..540fa1dfc77dff 100644 +--- a/fs/f2fs/super.c ++++ b/fs/f2fs/super.c +@@ -2543,6 +2543,11 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) + return err; + } + ++static void f2fs_shutdown(struct super_block *sb) ++{ ++ f2fs_do_shutdown(F2FS_SB(sb), F2FS_GOING_DOWN_NOSYNC, false, false); ++} ++ + #ifdef CONFIG_QUOTA + static bool f2fs_need_recovery(struct f2fs_sb_info *sbi) + { +@@ -3142,6 +3147,7 @@ static const struct super_operations f2fs_sops = { + .unfreeze_fs = f2fs_unfreeze, + .statfs = f2fs_statfs, + .remount_fs = f2fs_remount, ++ .shutdown = f2fs_shutdown, + }; + + #ifdef CONFIG_FS_ENCRYPTION +@@ -3330,9 +3336,9 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi, + u32 segment_count = le32_to_cpu(raw_super->segment_count); + u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); + u64 main_end_blkaddr = main_blkaddr + +- (segment_count_main << log_blocks_per_seg); ++ ((u64)segment_count_main << log_blocks_per_seg); + u64 seg_end_blkaddr = segment0_blkaddr + +- (segment_count << log_blocks_per_seg); ++ ((u64)segment_count << log_blocks_per_seg); + + if (segment0_blkaddr != cp_blkaddr) { + f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)", +@@ -4131,12 +4137,14 @@ void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason, + } + + f2fs_warn(sbi, "Remounting filesystem read-only"); ++ + /* +- * Make sure updated value of ->s_mount_flags will be visible before +- * ->s_flags update ++ * We have already set CP_ERROR_FLAG flag to stop all updates ++ * to filesystem, so it doesn't need to set SB_RDONLY flag here ++ * because the flag should be set covered w/ sb->s_umount semaphore ++ * via remount procedure, otherwise, it will confuse code like ++ * freeze_super() which will lead to deadlocks and other problems. + */ +- smp_wmb(); +- sb->s_flags |= SB_RDONLY; + } + + static void f2fs_record_error_work(struct work_struct *work) +diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c +index e47cc917d11842..54ab9caaae4dee 100644 +--- a/fs/f2fs/xattr.c ++++ b/fs/f2fs/xattr.c +@@ -629,6 +629,7 @@ static int __f2fs_setxattr(struct inode *inode, int index, + const char *name, const void *value, size_t size, + struct page *ipage, int flags) + { ++ struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct f2fs_xattr_entry *here, *last; + void *base_addr, *last_base_addr; + int found, newsize; +@@ -772,9 +773,18 @@ static int __f2fs_setxattr(struct inode *inode, int index, + if (index == F2FS_XATTR_INDEX_ENCRYPTION && + !strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT)) + f2fs_set_encrypted_inode(inode); +- if (S_ISDIR(inode->i_mode)) +- set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP); + ++ if (!S_ISDIR(inode->i_mode)) ++ goto same; ++ /* ++ * In restrict mode, fsync() always try to trigger checkpoint for all ++ * metadata consistency, in other mode, it triggers checkpoint when ++ * parent's xattr metadata was updated. ++ */ ++ if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) ++ set_sbi_flag(sbi, SBI_NEED_CP); ++ else ++ f2fs_add_ino_entry(sbi, inode->i_ino, XATTR_DIR_INO); + same: + if (is_inode_flag_set(inode, FI_ACL_MODE)) { + inode->i_mode = F2FS_I(inode)->i_acl_mode; +diff --git a/fs/fcntl.c b/fs/fcntl.c +index 9f606714d081b2..1484f062ee65e3 100644 +--- a/fs/fcntl.c ++++ b/fs/fcntl.c +@@ -86,8 +86,8 @@ static int setfl(int fd, struct file * filp, unsigned int arg) + return error; + } + +-static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, +- int force) ++void __f_setown(struct file *filp, struct pid *pid, enum pid_type type, ++ int force) + { + write_lock_irq(&filp->f_owner.lock); + if (force || !filp->f_owner.pid) { +@@ -97,19 +97,13 @@ static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, + + if (pid) { + const struct cred *cred = current_cred(); ++ security_file_set_fowner(filp); + filp->f_owner.uid = cred->uid; + filp->f_owner.euid = cred->euid; + } + } + write_unlock_irq(&filp->f_owner.lock); + } +- +-void __f_setown(struct file *filp, struct pid *pid, enum pid_type type, +- int force) +-{ +- security_file_set_fowner(filp); +- f_modown(filp, pid, type, force); +-} + EXPORT_SYMBOL(__f_setown); + + int f_setown(struct file *filp, int who, int force) +@@ -145,7 +139,7 @@ EXPORT_SYMBOL(f_setown); + + void f_delown(struct file *filp) + { +- f_modown(filp, NULL, PIDTYPE_TGID, 1); ++ __f_setown(filp, NULL, PIDTYPE_TGID, 1); + } + + pid_t f_getown(struct file *filp) +diff --git a/fs/inode.c b/fs/inode.c +index 776c2a049d07a1..9cafde77e2b038 100644 +--- a/fs/inode.c ++++ b/fs/inode.c +@@ -757,6 +757,10 @@ void evict_inodes(struct super_block *sb) + continue; + + spin_lock(&inode->i_lock); ++ if (atomic_read(&inode->i_count)) { ++ spin_unlock(&inode->i_lock); ++ continue; ++ } + if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { + spin_unlock(&inode->i_lock); + continue; +diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c +index 5713994328cbcb..0625d1c0d0649a 100644 +--- a/fs/jfs/jfs_dmap.c ++++ b/fs/jfs/jfs_dmap.c +@@ -187,7 +187,7 @@ int dbMount(struct inode *ipbmap) + } + + bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag); +- if (!bmp->db_numag) { ++ if (!bmp->db_numag || bmp->db_numag >= MAXAG) { + err = -EINVAL; + goto err_release_metapage; + } +@@ -652,7 +652,7 @@ int dbNextAG(struct inode *ipbmap) + * average free space. + */ + for (i = 0 ; i < bmp->db_numag; i++, agpref++) { +- if (agpref == bmp->db_numag) ++ if (agpref >= bmp->db_numag) + agpref = 0; + + if (atomic_read(&bmp->db_active[agpref])) +diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c +index 82d88dcf0ea6bf..b30e4cf2f5794c 100644 +--- a/fs/jfs/jfs_imap.c ++++ b/fs/jfs/jfs_imap.c +@@ -1360,7 +1360,7 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip) + /* get the ag number of this iag */ + agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb)); + dn_numag = JFS_SBI(pip->i_sb)->bmap->db_numag; +- if (agno < 0 || agno > dn_numag) ++ if (agno < 0 || agno > dn_numag || agno >= MAXAG) + return -EIO; + + if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) { +diff --git a/fs/namei.c b/fs/namei.c +index e728ba085ebee7..beffbb02a24e67 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -2667,10 +2667,8 @@ static int lookup_one_common(struct mnt_idmap *idmap, + if (!len) + return -EACCES; + +- if (unlikely(name[0] == '.')) { +- if (len < 2 || (len == 2 && name[1] == '.')) +- return -EACCES; +- } ++ if (is_dot_dotdot(name, len)) ++ return -EACCES; + + while (len--) { + unsigned int c = *(const unsigned char *)name++; +diff --git a/fs/namespace.c b/fs/namespace.c +index e6c61d4997ccf7..b4385e2413d599 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -2796,8 +2796,15 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount * + if (!__mnt_is_readonly(mnt) && + (!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) && + (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) { +- char *buf = (char *)__get_free_page(GFP_KERNEL); +- char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(-ENOMEM); ++ char *buf, *mntpath; ++ ++ buf = (char *)__get_free_page(GFP_KERNEL); ++ if (buf) ++ mntpath = d_path(mountpoint, buf, PAGE_SIZE); ++ else ++ mntpath = ERR_PTR(-ENOMEM); ++ if (IS_ERR(mntpath)) ++ mntpath = "(unknown)"; + + pr_warn("%s filesystem being %s at %s supports timestamps until %ptTd (0x%llx)\n", + sb->s_type->name, +@@ -2805,8 +2812,9 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount * + mntpath, &sb->s_time_max, + (unsigned long long)sb->s_time_max); + +- free_page((unsigned long)buf); + sb->s_iflags |= SB_I_TS_EXPIRY_WARNED; ++ if (buf) ++ free_page((unsigned long)buf); + } + } + +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c +index c95c50328ced89..6c4539ed2654ac 100644 +--- a/fs/nfs/nfs4state.c ++++ b/fs/nfs/nfs4state.c +@@ -1957,6 +1957,7 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov + set_bit(ops->owner_flag_bit, &sp->so_flags); + nfs4_put_state_owner(sp); + status = nfs4_recovery_handle_error(clp, status); ++ nfs4_free_state_owners(&freeme); + return (status != 0) ? status : -EAGAIN; + } + +diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c +index 07bf219f9ae482..5c9f8f8404d5b0 100644 +--- a/fs/nfsd/filecache.c ++++ b/fs/nfsd/filecache.c +@@ -1040,8 +1040,6 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, + if (likely(ret == 0)) + goto open_file; + +- if (ret == -EEXIST) +- goto retry; + trace_nfsd_file_insert_err(rqstp, inode, may_flags, ret); + status = nfserr_jukebox; + goto construction_err; +@@ -1056,6 +1054,7 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, + status = nfserr_jukebox; + goto construction_err; + } ++ nfsd_file_put(nf); + open_retry = false; + fh_put(fhp); + goto retry; +diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c +index 7a806ac13e317e..8cca1329f3485c 100644 +--- a/fs/nfsd/nfs4idmap.c ++++ b/fs/nfsd/nfs4idmap.c +@@ -581,6 +581,7 @@ static __be32 idmap_id_to_name(struct xdr_stream *xdr, + .id = id, + .type = type, + }; ++ __be32 status = nfs_ok; + __be32 *p; + int ret; + struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); +@@ -593,12 +594,16 @@ static __be32 idmap_id_to_name(struct xdr_stream *xdr, + return nfserrno(ret); + ret = strlen(item->name); + WARN_ON_ONCE(ret > IDMAP_NAMESZ); ++ + p = xdr_reserve_space(xdr, ret + 4); +- if (!p) +- return nfserr_resource; +- p = xdr_encode_opaque(p, item->name, ret); ++ if (unlikely(!p)) { ++ status = nfserr_resource; ++ goto out_put; ++ } ++ xdr_encode_opaque(p, item->name, ret); ++out_put: + cache_put(&item->h, nn->idtoname_cache); +- return 0; ++ return status; + } + + static bool +diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c +index 3509e73abe1f4b..4395577825a7fa 100644 +--- a/fs/nfsd/nfs4recover.c ++++ b/fs/nfsd/nfs4recover.c +@@ -806,6 +806,10 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg, + ci = &cmsg->cm_u.cm_clntinfo; + if (get_user(namelen, &ci->cc_name.cn_len)) + return -EFAULT; ++ if (!namelen) { ++ dprintk("%s: namelen should not be zero", __func__); ++ return -EINVAL; ++ } + name.data = memdup_user(&ci->cc_name.cn_id, namelen); + if (IS_ERR(name.data)) + return PTR_ERR(name.data); +@@ -828,6 +832,10 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg, + cnm = &cmsg->cm_u.cm_name; + if (get_user(namelen, &cnm->cn_len)) + return -EFAULT; ++ if (!namelen) { ++ dprintk("%s: namelen should not be zero", __func__); ++ return -EINVAL; ++ } + name.data = memdup_user(&cnm->cn_id, namelen); + if (IS_ERR(name.data)) + return PTR_ERR(name.data); +diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c +index 598f0586705957..dbd27a44632fa9 100644 +--- a/fs/nilfs2/btree.c ++++ b/fs/nilfs2/btree.c +@@ -350,7 +350,7 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, + if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN || + level >= NILFS_BTREE_LEVEL_MAX || + (flags & NILFS_BTREE_NODE_ROOT) || +- nchildren < 0 || ++ nchildren <= 0 || + nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) { + nilfs_crit(inode->i_sb, + "bad btree node (ino=%lu, blocknr=%llu): level = %d, flags = 0x%x, nchildren = %d", +@@ -381,7 +381,8 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, + if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN || + level >= NILFS_BTREE_LEVEL_MAX || + nchildren < 0 || +- nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) { ++ nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX || ++ (nchildren == 0 && level > NILFS_BTREE_LEVEL_NODE_MIN))) { + nilfs_crit(inode->i_sb, + "bad btree root (ino=%lu): level = %d, flags = 0x%x, nchildren = %d", + inode->i_ino, level, flags, nchildren); +@@ -1658,13 +1659,16 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key) + int nchildren, ret; + + root = nilfs_btree_get_root(btree); ++ nchildren = nilfs_btree_node_get_nchildren(root); ++ if (unlikely(nchildren == 0)) ++ return 0; ++ + switch (nilfs_btree_height(btree)) { + case 2: + bh = NULL; + node = root; + break; + case 3: +- nchildren = nilfs_btree_node_get_nchildren(root); + if (nchildren > 1) + return 0; + ptr = nilfs_btree_node_get_ptr(root, nchildren - 1, +@@ -1673,12 +1677,12 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key) + if (ret < 0) + return ret; + node = (struct nilfs_btree_node *)bh->b_data; ++ nchildren = nilfs_btree_node_get_nchildren(node); + break; + default: + return 0; + } + +- nchildren = nilfs_btree_node_get_nchildren(node); + maxkey = nilfs_btree_node_get_key(node, nchildren - 1); + nextmaxkey = (nchildren > 1) ? + nilfs_btree_node_get_key(node, nchildren - 2) : 0; +diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c +index dceb4bc76a66ac..2c548e8efef060 100644 +--- a/fs/smb/server/vfs.c ++++ b/fs/smb/server/vfs.c +@@ -496,7 +496,7 @@ int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp, + int err = 0; + + if (work->conn->connection_type) { +- if (!(fp->daccess & FILE_WRITE_DATA_LE)) { ++ if (!(fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE))) { + pr_err("no right to write(%pD)\n", fp->filp); + err = -EACCES; + goto out; +@@ -1110,9 +1110,10 @@ static bool __dir_empty(struct dir_context *ctx, const char *name, int namlen, + struct ksmbd_readdir_data *buf; + + buf = container_of(ctx, struct ksmbd_readdir_data, ctx); +- buf->dirent_count++; ++ if (!is_dot_dotdot(name, namlen)) ++ buf->dirent_count++; + +- return buf->dirent_count <= 2; ++ return !buf->dirent_count; + } + + /** +@@ -1132,7 +1133,7 @@ int ksmbd_vfs_empty_dir(struct ksmbd_file *fp) + readdir_data.dirent_count = 0; + + err = iterate_dir(fp->filp, &readdir_data.ctx); +- if (readdir_data.dirent_count > 2) ++ if (readdir_data.dirent_count) + err = -ENOTEMPTY; + else + err = 0; +@@ -1161,7 +1162,7 @@ static bool __caseless_lookup(struct dir_context *ctx, const char *name, + if (cmp < 0) + cmp = strncasecmp((char *)buf->private, name, namlen); + if (!cmp) { +- memcpy((char *)buf->private, name, namlen); ++ memcpy((char *)buf->private, name, buf->used); + buf->dirent_count = 1; + return false; + } +@@ -1229,10 +1230,7 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name, + char *filepath; + size_t path_len, remain_len; + +- filepath = kstrdup(name, GFP_KERNEL); +- if (!filepath) +- return -ENOMEM; +- ++ filepath = name; + path_len = strlen(filepath); + remain_len = path_len; + +@@ -1275,10 +1273,9 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name, + err = -EINVAL; + out2: + path_put(parent_path); +-out1: +- kfree(filepath); + } + ++out1: + if (!err) { + err = mnt_want_write(parent_path->mnt); + if (err) { +diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h +index c0b69ffe7bdb4b..ec425d2834f869 100644 +--- a/include/acpi/cppc_acpi.h ++++ b/include/acpi/cppc_acpi.h +@@ -64,6 +64,8 @@ struct cpc_desc { + int cpu_id; + int write_cmd_status; + int write_cmd_id; ++ /* Lock used for RMW operations in cpc_write() */ ++ spinlock_t rmw_lock; + struct cpc_register_resource cpc_regs[MAX_CPC_REG_ENT]; + struct acpi_psd_package domain_info; + struct kobject kobj; +diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h +index 3c8d2b87a9edce..729ec2453149f9 100644 +--- a/include/linux/bitmap.h ++++ b/include/linux/bitmap.h +@@ -77,6 +77,10 @@ struct device; + * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst + * bitmap_get_value8(map, start) Get 8bit value from map at start + * bitmap_set_value8(map, value, start) Set 8bit value to map at start ++ * bitmap_read(map, start, nbits) Read an nbits-sized value from ++ * map at start ++ * bitmap_write(map, value, start, nbits) Write an nbits-sized value to ++ * map at start + * + * Note, bitmap_zero() and bitmap_fill() operate over the region of + * unsigned longs, that is, bits behind bitmap till the unsigned long +@@ -613,6 +617,79 @@ static inline void bitmap_set_value8(unsigned long *map, unsigned long value, + map[index] |= value << offset; + } + ++/** ++ * bitmap_read - read a value of n-bits from the memory region ++ * @map: address to the bitmap memory region ++ * @start: bit offset of the n-bit value ++ * @nbits: size of value in bits, nonzero, up to BITS_PER_LONG ++ * ++ * Returns: value of @nbits bits located at the @start bit offset within the ++ * @map memory region. For @nbits = 0 and @nbits > BITS_PER_LONG the return ++ * value is undefined. ++ */ ++static inline unsigned long bitmap_read(const unsigned long *map, ++ unsigned long start, ++ unsigned long nbits) ++{ ++ size_t index = BIT_WORD(start); ++ unsigned long offset = start % BITS_PER_LONG; ++ unsigned long space = BITS_PER_LONG - offset; ++ unsigned long value_low, value_high; ++ ++ if (unlikely(!nbits || nbits > BITS_PER_LONG)) ++ return 0; ++ ++ if (space >= nbits) ++ return (map[index] >> offset) & BITMAP_LAST_WORD_MASK(nbits); ++ ++ value_low = map[index] & BITMAP_FIRST_WORD_MASK(start); ++ value_high = map[index + 1] & BITMAP_LAST_WORD_MASK(start + nbits); ++ return (value_low >> offset) | (value_high << space); ++} ++ ++/** ++ * bitmap_write - write n-bit value within a memory region ++ * @map: address to the bitmap memory region ++ * @value: value to write, clamped to nbits ++ * @start: bit offset of the n-bit value ++ * @nbits: size of value in bits, nonzero, up to BITS_PER_LONG. ++ * ++ * bitmap_write() behaves as-if implemented as @nbits calls of __assign_bit(), ++ * i.e. bits beyond @nbits are ignored: ++ * ++ * for (bit = 0; bit < nbits; bit++) ++ * __assign_bit(start + bit, bitmap, val & BIT(bit)); ++ * ++ * For @nbits == 0 and @nbits > BITS_PER_LONG no writes are performed. ++ */ ++static inline void bitmap_write(unsigned long *map, unsigned long value, ++ unsigned long start, unsigned long nbits) ++{ ++ size_t index; ++ unsigned long offset; ++ unsigned long space; ++ unsigned long mask; ++ bool fit; ++ ++ if (unlikely(!nbits || nbits > BITS_PER_LONG)) ++ return; ++ ++ mask = BITMAP_LAST_WORD_MASK(nbits); ++ value &= mask; ++ offset = start % BITS_PER_LONG; ++ space = BITS_PER_LONG - offset; ++ fit = space >= nbits; ++ index = BIT_WORD(start); ++ ++ map[index] &= (fit ? (~(mask << offset)) : ~BITMAP_FIRST_WORD_MASK(start)); ++ map[index] |= value << offset; ++ if (fit) ++ return; ++ ++ map[index + 1] &= BITMAP_FIRST_WORD_MASK(start + nbits); ++ map[index + 1] |= (value >> space); ++} ++ + #endif /* __ASSEMBLY__ */ + + #endif /* __LINUX_BITMAP_H */ +diff --git a/include/linux/bpf.h b/include/linux/bpf.h +index e4cd28c38b825e..722102518dc951 100644 +--- a/include/linux/bpf.h ++++ b/include/linux/bpf.h +@@ -675,6 +675,11 @@ enum bpf_type_flag { + /* DYNPTR points to xdp_buff */ + DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS), + ++ /* Memory must be aligned on some architectures, used in combination with ++ * MEM_FIXED_SIZE. ++ */ ++ MEM_ALIGNED = BIT(17 + BPF_BASE_TYPE_BITS), ++ + __BPF_TYPE_FLAG_MAX, + __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1, + }; +@@ -711,8 +716,6 @@ enum bpf_arg_type { + ARG_ANYTHING, /* any (initialized) argument is ok */ + ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ + ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ +- ARG_PTR_TO_INT, /* pointer to int */ +- ARG_PTR_TO_LONG, /* pointer to long */ + ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ + ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ + ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */ +diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h +index 1352a24d72ef41..b9affa64b7fa22 100644 +--- a/include/linux/f2fs_fs.h ++++ b/include/linux/f2fs_fs.h +@@ -262,7 +262,7 @@ struct f2fs_extent { + #define F2FS_INLINE_DATA 0x02 /* file inline data flag */ + #define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */ + #define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */ +-#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */ ++#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries (obsolete) */ + #define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */ + #define F2FS_PIN_FILE 0x40 /* file should not be gced */ + #define F2FS_COMPRESS_RELEASED 0x80 /* file released compressed blocks */ +diff --git a/include/linux/fs.h b/include/linux/fs.h +index 2a554e4045a9ac..6c3d86532e3f91 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -2844,6 +2844,17 @@ extern bool path_is_under(const struct path *, const struct path *); + + extern char *file_path(struct file *, char *, int); + ++/** ++ * is_dot_dotdot - returns true only if @name is "." or ".." ++ * @name: file name to check ++ * @len: length of file name, in bytes ++ */ ++static inline bool is_dot_dotdot(const char *name, size_t len) ++{ ++ return len && unlikely(name[0] == '.') && ++ (len == 1 || (len == 2 && name[1] == '.')); ++} ++ + #include + + /* needed for stackable file system support */ +diff --git a/include/linux/mm.h b/include/linux/mm.h +index 830b925c2d0054..b6a4d6471b4a72 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -1732,8 +1732,8 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma) + unsigned int pid_bit; + + pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG)); +- if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->access_pids[1])) { +- __set_bit(pid_bit, &vma->numab_state->access_pids[1]); ++ if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) { ++ __set_bit(pid_bit, &vma->numab_state->pids_active[1]); + } + } + #else /* !CONFIG_NUMA_BALANCING */ +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h +index ba25777ec0a716..43c19d85dfe7fe 100644 +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -551,9 +551,36 @@ struct vma_lock { + }; + + struct vma_numab_state { ++ /* ++ * Initialised as time in 'jiffies' after which VMA ++ * should be scanned. Delays first scan of new VMA by at ++ * least sysctl_numa_balancing_scan_delay: ++ */ + unsigned long next_scan; +- unsigned long next_pid_reset; +- unsigned long access_pids[2]; ++ ++ /* ++ * Time in jiffies when pids_active[] is reset to ++ * detect phase change behaviour: ++ */ ++ unsigned long pids_active_reset; ++ ++ /* ++ * Approximate tracking of PIDs that trapped a NUMA hinting ++ * fault. May produce false positives due to hash collisions. ++ * ++ * [0] Previous PID tracking ++ * [1] Current PID tracking ++ * ++ * Window moves after next_pid_reset has expired approximately ++ * every VMA_PID_RESET_PERIOD jiffies: ++ */ ++ unsigned long pids_active[2]; ++ ++ /* ++ * MM scan sequence ID when the VMA was last completely scanned. ++ * A VMA is not eligible for scanning if prev_scan_seq == numa_scan_seq ++ */ ++ int prev_scan_seq; + }; + + /* +diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h +index c09cdcc99471e1..189140bf11fc40 100644 +--- a/include/linux/sbitmap.h ++++ b/include/linux/sbitmap.h +@@ -40,7 +40,7 @@ struct sbitmap_word { + /** + * @swap_lock: serializes simultaneous updates of ->word and ->cleared + */ +- spinlock_t swap_lock; ++ raw_spinlock_t swap_lock; + } ____cacheline_aligned_in_smp; + + /** +diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h +index 3988762efe15c0..b69afb8630db4a 100644 +--- a/include/linux/sched/numa_balancing.h ++++ b/include/linux/sched/numa_balancing.h +@@ -15,6 +15,16 @@ + #define TNF_FAULT_LOCAL 0x08 + #define TNF_MIGRATE_FAIL 0x10 + ++enum numa_vmaskip_reason { ++ NUMAB_SKIP_UNSUITABLE, ++ NUMAB_SKIP_SHARED_RO, ++ NUMAB_SKIP_INACCESSIBLE, ++ NUMAB_SKIP_SCAN_DELAY, ++ NUMAB_SKIP_PID_INACTIVE, ++ NUMAB_SKIP_IGNORE_PID, ++ NUMAB_SKIP_SEQ_COMPLETED, ++}; ++ + #ifdef CONFIG_NUMA_BALANCING + extern void task_numa_fault(int last_node, int node, int pages, int flags); + extern pid_t task_numa_group_id(struct task_struct *p); +diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h +index 9f08a584d70785..0b9f1e598e3a6b 100644 +--- a/include/linux/usb/usbnet.h ++++ b/include/linux/usb/usbnet.h +@@ -76,8 +76,23 @@ struct usbnet { + # define EVENT_LINK_CHANGE 11 + # define EVENT_SET_RX_MODE 12 + # define EVENT_NO_IP_ALIGN 13 ++/* This one is special, as it indicates that the device is going away ++ * there are cyclic dependencies between tasklet, timer and bh ++ * that must be broken ++ */ ++# define EVENT_UNPLUG 31 + }; + ++static inline bool usbnet_going_away(struct usbnet *ubn) ++{ ++ return test_bit(EVENT_UNPLUG, &ubn->flags); ++} ++ ++static inline void usbnet_mark_going_away(struct usbnet *ubn) ++{ ++ set_bit(EVENT_UNPLUG, &ubn->flags); ++} ++ + static inline struct usb_driver *driver_of(struct usb_interface *intf) + { + return to_usb_driver(intf->dev.driver); +diff --git a/include/linux/xarray.h b/include/linux/xarray.h +index cb571dfcf4b167..d9d479334c9e65 100644 +--- a/include/linux/xarray.h ++++ b/include/linux/xarray.h +@@ -1548,6 +1548,7 @@ void xas_create_range(struct xa_state *); + + #ifdef CONFIG_XARRAY_MULTI + int xa_get_order(struct xarray *, unsigned long index); ++int xas_get_order(struct xa_state *xas); + void xas_split(struct xa_state *, void *entry, unsigned int order); + void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t); + #else +@@ -1556,6 +1557,11 @@ static inline int xa_get_order(struct xarray *xa, unsigned long index) + return 0; + } + ++static inline int xas_get_order(struct xa_state *xas) ++{ ++ return 0; ++} ++ + static inline void xas_split(struct xa_state *xas, void *entry, + unsigned int order) + { +diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h +index 29f1549ee11145..0f50c0cefcb7dc 100644 +--- a/include/net/bluetooth/hci_core.h ++++ b/include/net/bluetooth/hci_core.h +@@ -2224,8 +2224,8 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, + bool mgmt_connected); + void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 link_type, u8 addr_type, u8 status); +-void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, +- u8 addr_type, u8 status); ++void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, ++ u8 status); + void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure); + void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 status); +diff --git a/include/net/ip.h b/include/net/ip.h +index 6f1ff4846451bf..7db5912e0c5f63 100644 +--- a/include/net/ip.h ++++ b/include/net/ip.h +@@ -786,6 +786,8 @@ static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) + } + + bool icmp_global_allow(void); ++void icmp_global_consume(void); ++ + extern int sysctl_icmp_msgs_per_sec; + extern int sysctl_icmp_msgs_burst; + +diff --git a/include/net/mac80211.h b/include/net/mac80211.h +index a39bd4169f2926..47ade676565dbc 100644 +--- a/include/net/mac80211.h ++++ b/include/net/mac80211.h +@@ -936,8 +936,9 @@ enum mac80211_tx_info_flags { + * of their QoS TID or other priority field values. + * @IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX: first MLO TX, used mostly internally + * for sequence number assignment +- * @IEEE80211_TX_CTRL_SCAN_TX: Indicates that this frame is transmitted +- * due to scanning, not in normal operation on the interface. ++ * @IEEE80211_TX_CTRL_DONT_USE_RATE_MASK: Don't use rate mask for this frame ++ * which is transmitted due to scanning or offchannel TX, not in normal ++ * operation on the interface. + * @IEEE80211_TX_CTRL_MLO_LINK: If not @IEEE80211_LINK_UNSPECIFIED, this + * frame should be transmitted on the specific link. This really is + * only relevant for frames that do not have data present, and is +@@ -958,7 +959,7 @@ enum mac80211_tx_control_flags { + IEEE80211_TX_CTRL_NO_SEQNO = BIT(7), + IEEE80211_TX_CTRL_DONT_REORDER = BIT(8), + IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX = BIT(9), +- IEEE80211_TX_CTRL_SCAN_TX = BIT(10), ++ IEEE80211_TX_CTRL_DONT_USE_RATE_MASK = BIT(10), + IEEE80211_TX_CTRL_MLO_LINK = 0xf0000000, + }; + +diff --git a/include/net/tcp.h b/include/net/tcp.h +index c206ffaa8ed703..b3917af309e0f1 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -2230,9 +2230,26 @@ static inline s64 tcp_rto_delta_us(const struct sock *sk) + { + const struct sk_buff *skb = tcp_rtx_queue_head(sk); + u32 rto = inet_csk(sk)->icsk_rto; +- u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto); + +- return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; ++ if (likely(skb)) { ++ u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto); ++ ++ return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; ++ } else { ++ WARN_ONCE(1, ++ "rtx queue emtpy: " ++ "out:%u sacked:%u lost:%u retrans:%u " ++ "tlp_high_seq:%u sk_state:%u ca_state:%u " ++ "advmss:%u mss_cache:%u pmtu:%u\n", ++ tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out, ++ tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out, ++ tcp_sk(sk)->tlp_high_seq, sk->sk_state, ++ inet_csk(sk)->icsk_ca_state, ++ tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache, ++ inet_csk(sk)->icsk_pmtu_cookie); ++ return jiffies_to_usecs(rto); ++ } ++ + } + + /* +diff --git a/include/sound/tas2781.h b/include/sound/tas2781.h +index be58d870505a4b..f97f386e5a55a3 100644 +--- a/include/sound/tas2781.h ++++ b/include/sound/tas2781.h +@@ -78,11 +78,6 @@ struct tasdevice { + bool is_loaderr; + }; + +-struct tasdevice_irqinfo { +- int irq_gpio; +- int irq; +-}; +- + struct calidata { + unsigned char *data; + unsigned long total_sz; +@@ -90,7 +85,6 @@ struct calidata { + + struct tasdevice_priv { + struct tasdevice tasdevice[TASDEVICE_MAX_CHANNELS]; +- struct tasdevice_irqinfo irq_info; + struct tasdevice_rca rcabin; + struct calidata cali_data; + struct tasdevice_fw *fmw; +@@ -101,7 +95,6 @@ struct tasdevice_priv { + struct tm tm; + + enum device_catlog_id catlog_id; +- const char *acpi_subsystem_id; + unsigned char cal_binaryname[TASDEVICE_MAX_CHANNELS][64]; + unsigned char crc8_lkp_tbl[CRC8_TABLE_SIZE]; + unsigned char coef_binaryname[64]; +@@ -112,6 +105,7 @@ struct tasdevice_priv { + unsigned int chip_id; + unsigned int sysclk; + ++ int irq; + int cur_prog; + int cur_conf; + int fw_state; +diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h +index 793f82cc1515a8..b6ffae01a8cd86 100644 +--- a/include/trace/events/f2fs.h ++++ b/include/trace/events/f2fs.h +@@ -139,7 +139,8 @@ TRACE_DEFINE_ENUM(EX_BLOCK_AGE); + { CP_NODE_NEED_CP, "node needs cp" }, \ + { CP_FASTBOOT_MODE, "fastboot mode" }, \ + { CP_SPEC_LOG_NUM, "log type is 2" }, \ +- { CP_RECOVER_DIR, "dir needs recovery" }) ++ { CP_RECOVER_DIR, "dir needs recovery" }, \ ++ { CP_XATTR_DIR, "dir's xattr updated" }) + + #define show_shutdown_mode(type) \ + __print_symbolic(type, \ +diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h +index fbb99a61f714cb..010ba1b7cb0eac 100644 +--- a/include/trace/events/sched.h ++++ b/include/trace/events/sched.h +@@ -664,6 +664,58 @@ DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa, + TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu) + ); + ++#ifdef CONFIG_NUMA_BALANCING ++#define NUMAB_SKIP_REASON \ ++ EM( NUMAB_SKIP_UNSUITABLE, "unsuitable" ) \ ++ EM( NUMAB_SKIP_SHARED_RO, "shared_ro" ) \ ++ EM( NUMAB_SKIP_INACCESSIBLE, "inaccessible" ) \ ++ EM( NUMAB_SKIP_SCAN_DELAY, "scan_delay" ) \ ++ EM( NUMAB_SKIP_PID_INACTIVE, "pid_inactive" ) \ ++ EM( NUMAB_SKIP_IGNORE_PID, "ignore_pid_inactive" ) \ ++ EMe(NUMAB_SKIP_SEQ_COMPLETED, "seq_completed" ) ++ ++/* Redefine for export. */ ++#undef EM ++#undef EMe ++#define EM(a, b) TRACE_DEFINE_ENUM(a); ++#define EMe(a, b) TRACE_DEFINE_ENUM(a); ++ ++NUMAB_SKIP_REASON ++ ++/* Redefine for symbolic printing. */ ++#undef EM ++#undef EMe ++#define EM(a, b) { a, b }, ++#define EMe(a, b) { a, b } ++ ++TRACE_EVENT(sched_skip_vma_numa, ++ ++ TP_PROTO(struct mm_struct *mm, struct vm_area_struct *vma, ++ enum numa_vmaskip_reason reason), ++ ++ TP_ARGS(mm, vma, reason), ++ ++ TP_STRUCT__entry( ++ __field(unsigned long, numa_scan_offset) ++ __field(unsigned long, vm_start) ++ __field(unsigned long, vm_end) ++ __field(enum numa_vmaskip_reason, reason) ++ ), ++ ++ TP_fast_assign( ++ __entry->numa_scan_offset = mm->numa_scan_offset; ++ __entry->vm_start = vma->vm_start; ++ __entry->vm_end = vma->vm_end; ++ __entry->reason = reason; ++ ), ++ ++ TP_printk("numa_scan_offset=%lX vm_start=%lX vm_end=%lX reason=%s", ++ __entry->numa_scan_offset, ++ __entry->vm_start, ++ __entry->vm_end, ++ __print_symbolic(__entry->reason, NUMAB_SKIP_REASON)) ++); ++#endif /* CONFIG_NUMA_BALANCING */ + + /* + * Tracepoint for waking a polling cpu without an IPI. +diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c +index 98c9cfb983062b..a1e31723c9ed69 100644 +--- a/io_uring/io-wq.c ++++ b/io_uring/io-wq.c +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -1169,7 +1170,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) + + if (!alloc_cpumask_var(&wq->cpu_mask, GFP_KERNEL)) + goto err; +- cpumask_copy(wq->cpu_mask, cpu_possible_mask); ++ cpuset_cpus_allowed(data->task, wq->cpu_mask); + wq->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; + wq->acct[IO_WQ_ACCT_UNBOUND].max_workers = + task_rlimit(current, RLIMIT_NPROC); +@@ -1324,17 +1325,29 @@ static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node) + + int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask) + { ++ cpumask_var_t allowed_mask; ++ int ret = 0; ++ + if (!tctx || !tctx->io_wq) + return -EINVAL; + ++ if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL)) ++ return -ENOMEM; ++ + rcu_read_lock(); +- if (mask) +- cpumask_copy(tctx->io_wq->cpu_mask, mask); +- else +- cpumask_copy(tctx->io_wq->cpu_mask, cpu_possible_mask); ++ cpuset_cpus_allowed(tctx->io_wq->task, allowed_mask); ++ if (mask) { ++ if (cpumask_subset(mask, allowed_mask)) ++ cpumask_copy(tctx->io_wq->cpu_mask, mask); ++ else ++ ret = -EINVAL; ++ } else { ++ cpumask_copy(tctx->io_wq->cpu_mask, allowed_mask); ++ } + rcu_read_unlock(); + +- return 0; ++ free_cpumask_var(allowed_mask); ++ return ret; + } + + /* +diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c +index 68504709f75cb4..7b0a100e1139ae 100644 +--- a/io_uring/io_uring.c ++++ b/io_uring/io_uring.c +@@ -2514,7 +2514,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, + return 1; + if (unlikely(!llist_empty(&ctx->work_llist))) + return 1; +- if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) ++ if (unlikely(task_work_pending(current))) + return 1; + if (unlikely(task_sigpending(current))) + return -EINTR; +@@ -2610,9 +2610,9 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, + * If we got woken because of task_work being processed, run it + * now rather than let the caller do another wait loop. + */ +- io_run_task_work(); + if (!llist_empty(&ctx->work_llist)) + io_run_local_work(ctx, nr_wait); ++ io_run_task_work(); + + /* + * Non-local task_work will be run on exit to userspace, but +diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c +index 350436e55aafeb..cdf8b567cb9443 100644 +--- a/io_uring/sqpoll.c ++++ b/io_uring/sqpoll.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + #include + + #include +@@ -401,11 +402,22 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, + return 0; + + if (p->flags & IORING_SETUP_SQ_AFF) { ++ cpumask_var_t allowed_mask; + int cpu = p->sq_thread_cpu; + + ret = -EINVAL; + if (cpu >= nr_cpu_ids || !cpu_online(cpu)) + goto err_sqpoll; ++ ret = -ENOMEM; ++ if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL)) ++ goto err_sqpoll; ++ ret = -EINVAL; ++ cpuset_cpus_allowed(current, allowed_mask); ++ if (!cpumask_test_cpu(cpu, allowed_mask)) { ++ free_cpumask_var(allowed_mask); ++ goto err_sqpoll; ++ } ++ free_cpumask_var(allowed_mask); + sqd->sq_cpu = cpu; + } else { + sqd->sq_cpu = -1; +diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c +index fbf9721ba21b6a..e0e4d4f490e87c 100644 +--- a/kernel/bpf/btf.c ++++ b/kernel/bpf/btf.c +@@ -8421,6 +8421,7 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, + struct bpf_core_cand_list cands = {}; + struct bpf_core_relo_res targ_res; + struct bpf_core_spec *specs; ++ const struct btf_type *type; + int err; + + /* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5" +@@ -8430,6 +8431,13 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, + if (!specs) + return -ENOMEM; + ++ type = btf_type_by_id(ctx->btf, relo->type_id); ++ if (!type) { ++ bpf_log(ctx->log, "relo #%u: bad type id %u\n", ++ relo_idx, relo->type_id); ++ return -EINVAL; ++ } ++ + if (need_cands) { + struct bpf_cand_cache *cc; + int i; +diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c +index 9ab6be9653059e..3dba5bb294d8e4 100644 +--- a/kernel/bpf/helpers.c ++++ b/kernel/bpf/helpers.c +@@ -516,11 +516,12 @@ static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags, + } + + BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, +- long *, res) ++ s64 *, res) + { + long long _res; + int err; + ++ *res = 0; + err = __bpf_strtoll(buf, buf_len, flags, &_res); + if (err < 0) + return err; +@@ -537,16 +538,18 @@ const struct bpf_func_proto bpf_strtol_proto = { + .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg2_type = ARG_CONST_SIZE, + .arg3_type = ARG_ANYTHING, +- .arg4_type = ARG_PTR_TO_LONG, ++ .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, ++ .arg4_size = sizeof(s64), + }; + + BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, +- unsigned long *, res) ++ u64 *, res) + { + unsigned long long _res; + bool is_negative; + int err; + ++ *res = 0; + err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); + if (err < 0) + return err; +@@ -565,7 +568,8 @@ const struct bpf_func_proto bpf_strtoul_proto = { + .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg2_type = ARG_CONST_SIZE, + .arg3_type = ARG_ANYTHING, +- .arg4_type = ARG_PTR_TO_LONG, ++ .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, ++ .arg4_size = sizeof(u64), + }; + + BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2) +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c +index 65df92f5b19223..b1933d074f0519 100644 +--- a/kernel/bpf/syscall.c ++++ b/kernel/bpf/syscall.c +@@ -5647,6 +5647,7 @@ static const struct bpf_func_proto bpf_sys_close_proto = { + + BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) + { ++ *res = 0; + if (flags) + return -EINVAL; + +@@ -5667,7 +5668,8 @@ static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = { + .arg1_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_ANYTHING, +- .arg4_type = ARG_PTR_TO_LONG, ++ .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, ++ .arg4_size = sizeof(u64), + }; + + static const struct bpf_func_proto * +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 9d5699942273ee..834394faf2af30 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -7987,6 +7987,12 @@ static bool arg_type_is_mem_size(enum bpf_arg_type type) + type == ARG_CONST_SIZE_OR_ZERO; + } + ++static bool arg_type_is_raw_mem(enum bpf_arg_type type) ++{ ++ return base_type(type) == ARG_PTR_TO_MEM && ++ type & MEM_UNINIT; ++} ++ + static bool arg_type_is_release(enum bpf_arg_type type) + { + return type & OBJ_RELEASE; +@@ -7997,16 +8003,6 @@ static bool arg_type_is_dynptr(enum bpf_arg_type type) + return base_type(type) == ARG_PTR_TO_DYNPTR; + } + +-static int int_ptr_type_to_size(enum bpf_arg_type type) +-{ +- if (type == ARG_PTR_TO_INT) +- return sizeof(u32); +- else if (type == ARG_PTR_TO_LONG) +- return sizeof(u64); +- +- return -EINVAL; +-} +- + static int resolve_map_arg_type(struct bpf_verifier_env *env, + const struct bpf_call_arg_meta *meta, + enum bpf_arg_type *arg_type) +@@ -8079,16 +8075,6 @@ static const struct bpf_reg_types mem_types = { + }, + }; + +-static const struct bpf_reg_types int_ptr_types = { +- .types = { +- PTR_TO_STACK, +- PTR_TO_PACKET, +- PTR_TO_PACKET_META, +- PTR_TO_MAP_KEY, +- PTR_TO_MAP_VALUE, +- }, +-}; +- + static const struct bpf_reg_types spin_lock_types = { + .types = { + PTR_TO_MAP_VALUE, +@@ -8143,8 +8129,6 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { + [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types, + [ARG_PTR_TO_MEM] = &mem_types, + [ARG_PTR_TO_RINGBUF_MEM] = &ringbuf_mem_types, +- [ARG_PTR_TO_INT] = &int_ptr_types, +- [ARG_PTR_TO_LONG] = &int_ptr_types, + [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types, + [ARG_PTR_TO_FUNC] = &func_ptr_types, + [ARG_PTR_TO_STACK] = &stack_ptr_types, +@@ -8651,9 +8635,11 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, + */ + meta->raw_mode = arg_type & MEM_UNINIT; + if (arg_type & MEM_FIXED_SIZE) { +- err = check_helper_mem_access(env, regno, +- fn->arg_size[arg], false, +- meta); ++ err = check_helper_mem_access(env, regno, fn->arg_size[arg], false, meta); ++ if (err) ++ return err; ++ if (arg_type & MEM_ALIGNED) ++ err = check_ptr_alignment(env, reg, 0, fn->arg_size[arg], true); + } + break; + case ARG_CONST_SIZE: +@@ -8678,17 +8664,6 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, + if (err) + return err; + break; +- case ARG_PTR_TO_INT: +- case ARG_PTR_TO_LONG: +- { +- int size = int_ptr_type_to_size(arg_type); +- +- err = check_helper_mem_access(env, regno, size, false, meta); +- if (err) +- return err; +- err = check_ptr_alignment(env, reg, 0, size, true); +- break; +- } + case ARG_PTR_TO_CONST_STR: + { + struct bpf_map *map = reg->map_ptr; +@@ -9040,15 +9015,15 @@ static bool check_raw_mode_ok(const struct bpf_func_proto *fn) + { + int count = 0; + +- if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) ++ if (arg_type_is_raw_mem(fn->arg1_type)) + count++; +- if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) ++ if (arg_type_is_raw_mem(fn->arg2_type)) + count++; +- if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) ++ if (arg_type_is_raw_mem(fn->arg3_type)) + count++; +- if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) ++ if (arg_type_is_raw_mem(fn->arg4_type)) + count++; +- if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) ++ if (arg_type_is_raw_mem(fn->arg5_type)) + count++; + + /* We only support one arg being in raw mode at the moment, +diff --git a/kernel/kthread.c b/kernel/kthread.c +index 290cbc845225e4..ac03bc55e17caf 100644 +--- a/kernel/kthread.c ++++ b/kernel/kthread.c +@@ -844,8 +844,16 @@ int kthread_worker_fn(void *worker_ptr) + * event only cares about the address. + */ + trace_sched_kthread_work_execute_end(work, func); +- } else if (!freezing(current)) ++ } else if (!freezing(current)) { + schedule(); ++ } else { ++ /* ++ * Handle the case where the current remains ++ * TASK_INTERRUPTIBLE. try_to_freeze() expects ++ * the current to be TASK_RUNNING. ++ */ ++ __set_current_state(TASK_RUNNING); ++ } + + try_to_freeze(); + cond_resched(); +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c +index 151bd3de59363a..3468d8230e5f75 100644 +--- a/kernel/locking/lockdep.c ++++ b/kernel/locking/lockdep.c +@@ -6184,25 +6184,27 @@ static struct pending_free *get_pending_free(void) + static void free_zapped_rcu(struct rcu_head *cb); + + /* +- * Schedule an RCU callback if no RCU callback is pending. Must be called with +- * the graph lock held. +- */ +-static void call_rcu_zapped(struct pending_free *pf) ++* See if we need to queue an RCU callback, must called with ++* the lockdep lock held, returns false if either we don't have ++* any pending free or the callback is already scheduled. ++* Otherwise, a call_rcu() must follow this function call. ++*/ ++static bool prepare_call_rcu_zapped(struct pending_free *pf) + { + WARN_ON_ONCE(inside_selftest()); + + if (list_empty(&pf->zapped)) +- return; ++ return false; + + if (delayed_free.scheduled) +- return; ++ return false; + + delayed_free.scheduled = true; + + WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf); + delayed_free.index ^= 1; + +- call_rcu(&delayed_free.rcu_head, free_zapped_rcu); ++ return true; + } + + /* The caller must hold the graph lock. May be called from RCU context. */ +@@ -6228,6 +6230,7 @@ static void free_zapped_rcu(struct rcu_head *ch) + { + struct pending_free *pf; + unsigned long flags; ++ bool need_callback; + + if (WARN_ON_ONCE(ch != &delayed_free.rcu_head)) + return; +@@ -6239,14 +6242,18 @@ static void free_zapped_rcu(struct rcu_head *ch) + pf = delayed_free.pf + (delayed_free.index ^ 1); + __free_zapped_classes(pf); + delayed_free.scheduled = false; ++ need_callback = ++ prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index); ++ lockdep_unlock(); ++ raw_local_irq_restore(flags); + + /* +- * If there's anything on the open list, close and start a new callback. +- */ +- call_rcu_zapped(delayed_free.pf + delayed_free.index); ++ * If there's pending free and its callback has not been scheduled, ++ * queue an RCU callback. ++ */ ++ if (need_callback) ++ call_rcu(&delayed_free.rcu_head, free_zapped_rcu); + +- lockdep_unlock(); +- raw_local_irq_restore(flags); + } + + /* +@@ -6286,6 +6293,7 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size) + { + struct pending_free *pf; + unsigned long flags; ++ bool need_callback; + + init_data_structures_once(); + +@@ -6293,10 +6301,11 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size) + lockdep_lock(); + pf = get_pending_free(); + __lockdep_free_key_range(pf, start, size); +- call_rcu_zapped(pf); ++ need_callback = prepare_call_rcu_zapped(pf); + lockdep_unlock(); + raw_local_irq_restore(flags); +- ++ if (need_callback) ++ call_rcu(&delayed_free.rcu_head, free_zapped_rcu); + /* + * Wait for any possible iterators from look_up_lock_class() to pass + * before continuing to free the memory they refer to. +@@ -6390,6 +6399,7 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock) + struct pending_free *pf; + unsigned long flags; + int locked; ++ bool need_callback = false; + + raw_local_irq_save(flags); + locked = graph_lock(); +@@ -6398,11 +6408,13 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock) + + pf = get_pending_free(); + __lockdep_reset_lock(pf, lock); +- call_rcu_zapped(pf); ++ need_callback = prepare_call_rcu_zapped(pf); + + graph_unlock(); + out_irq: + raw_local_irq_restore(flags); ++ if (need_callback) ++ call_rcu(&delayed_free.rcu_head, free_zapped_rcu); + } + + /* +@@ -6446,6 +6458,7 @@ void lockdep_unregister_key(struct lock_class_key *key) + struct pending_free *pf; + unsigned long flags; + bool found = false; ++ bool need_callback = false; + + might_sleep(); + +@@ -6466,11 +6479,14 @@ void lockdep_unregister_key(struct lock_class_key *key) + if (found) { + pf = get_pending_free(); + __lockdep_free_key_range(pf, key, 1); +- call_rcu_zapped(pf); ++ need_callback = prepare_call_rcu_zapped(pf); + } + lockdep_unlock(); + raw_local_irq_restore(flags); + ++ if (need_callback) ++ call_rcu(&delayed_free.rcu_head, free_zapped_rcu); ++ + /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ + synchronize_rcu(); + } +diff --git a/kernel/module/Makefile b/kernel/module/Makefile +index a10b2b9a6fdfc6..50ffcc413b5450 100644 +--- a/kernel/module/Makefile ++++ b/kernel/module/Makefile +@@ -5,7 +5,7 @@ + + # These are called from save_stack_trace() on slub debug path, + # and produce insane amounts of uninteresting coverage. +-KCOV_INSTRUMENT_module.o := n ++KCOV_INSTRUMENT_main.o := n + + obj-y += main.o + obj-y += strict_rwx.o +diff --git a/kernel/padata.c b/kernel/padata.c +index 29545dd6dd53d5..9bf77b58ee08d4 100644 +--- a/kernel/padata.c ++++ b/kernel/padata.c +@@ -404,7 +404,8 @@ void padata_do_serial(struct padata_priv *padata) + /* Sort in ascending order of sequence number. */ + list_for_each_prev(pos, &reorder->list) { + cur = list_entry(pos, struct padata_priv, list); +- if (cur->seq_nr < padata->seq_nr) ++ /* Compare by difference to consider integer wrap around */ ++ if ((signed int)(cur->seq_nr - padata->seq_nr) < 0) + break; + } + list_add(&padata->list, pos); +@@ -511,9 +512,12 @@ void __init padata_do_multithreaded(struct padata_mt_job *job) + * thread function. Load balance large jobs between threads by + * increasing the number of chunks, guarantee at least the minimum + * chunk size from the caller, and honor the caller's alignment. ++ * Ensure chunk_size is at least 1 to prevent divide-by-0 ++ * panic in padata_mt_helper(). + */ + ps.chunk_size = job->size / (ps.nworks * load_balance_factor); + ps.chunk_size = max(ps.chunk_size, job->min_chunk); ++ ps.chunk_size = max(ps.chunk_size, 1ul); + ps.chunk_size = roundup(ps.chunk_size, job->align); + + /* +diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h +index 30b34f215ca354..e019f166daa61b 100644 +--- a/kernel/rcu/tree_nocb.h ++++ b/kernel/rcu/tree_nocb.h +@@ -220,7 +220,10 @@ static bool __wake_nocb_gp(struct rcu_data *rdp_gp, + raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); + if (needwake) { + trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake")); +- wake_up_process(rdp_gp->nocb_gp_kthread); ++ if (cpu_is_offline(raw_smp_processor_id())) ++ swake_up_one_online(&rdp_gp->nocb_gp_wq); ++ else ++ wake_up_process(rdp_gp->nocb_gp_kthread); + } + + return needwake; +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index b2e1009e5706ee..5eb4807bad209c 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -533,7 +533,7 @@ static int cfs_rq_is_idle(struct cfs_rq *cfs_rq) + + static int se_is_idle(struct sched_entity *se) + { +- return 0; ++ return task_has_idle_policy(task_of(se)); + } + + #endif /* CONFIG_FAIR_GROUP_SCHED */ +@@ -3188,7 +3188,7 @@ static void reset_ptenuma_scan(struct task_struct *p) + p->mm->numa_scan_offset = 0; + } + +-static bool vma_is_accessed(struct vm_area_struct *vma) ++static bool vma_is_accessed(struct mm_struct *mm, struct vm_area_struct *vma) + { + unsigned long pids; + /* +@@ -3200,8 +3200,29 @@ static bool vma_is_accessed(struct vm_area_struct *vma) + if (READ_ONCE(current->mm->numa_scan_seq) < 2) + return true; + +- pids = vma->numab_state->access_pids[0] | vma->numab_state->access_pids[1]; +- return test_bit(hash_32(current->pid, ilog2(BITS_PER_LONG)), &pids); ++ pids = vma->numab_state->pids_active[0] | vma->numab_state->pids_active[1]; ++ if (test_bit(hash_32(current->pid, ilog2(BITS_PER_LONG)), &pids)) ++ return true; ++ ++ /* ++ * Complete a scan that has already started regardless of PID access, or ++ * some VMAs may never be scanned in multi-threaded applications: ++ */ ++ if (mm->numa_scan_offset > vma->vm_start) { ++ trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_IGNORE_PID); ++ return true; ++ } ++ ++ /* ++ * This vma has not been accessed for a while, and if the number ++ * the threads in the same process is low, which means no other ++ * threads can help scan this vma, force a vma scan. ++ */ ++ if (READ_ONCE(mm->numa_scan_seq) > ++ (vma->numab_state->prev_scan_seq + get_nr_threads(current))) ++ return true; ++ ++ return false; + } + + #define VMA_PID_RESET_PERIOD (4 * sysctl_numa_balancing_scan_delay) +@@ -3221,6 +3242,8 @@ static void task_numa_work(struct callback_head *work) + unsigned long nr_pte_updates = 0; + long pages, virtpages; + struct vma_iterator vmi; ++ bool vma_pids_skipped; ++ bool vma_pids_forced = false; + + SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work)); + +@@ -3263,7 +3286,6 @@ static void task_numa_work(struct callback_head *work) + */ + p->node_stamp += 2 * TICK_NSEC; + +- start = mm->numa_scan_offset; + pages = sysctl_numa_balancing_scan_size; + pages <<= 20 - PAGE_SHIFT; /* MB in pages */ + virtpages = pages * 8; /* Scan up to this much virtual space */ +@@ -3273,6 +3295,16 @@ static void task_numa_work(struct callback_head *work) + + if (!mmap_read_trylock(mm)) + return; ++ ++ /* ++ * VMAs are skipped if the current PID has not trapped a fault within ++ * the VMA recently. Allow scanning to be forced if there is no ++ * suitable VMA remaining. ++ */ ++ vma_pids_skipped = false; ++ ++retry_pids: ++ start = mm->numa_scan_offset; + vma_iter_init(&vmi, mm, start); + vma = vma_next(&vmi); + if (!vma) { +@@ -3285,6 +3317,7 @@ static void task_numa_work(struct callback_head *work) + do { + if (!vma_migratable(vma) || !vma_policy_mof(vma) || + is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { ++ trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_UNSUITABLE); + continue; + } + +@@ -3295,15 +3328,19 @@ static void task_numa_work(struct callback_head *work) + * as migrating the pages will be of marginal benefit. + */ + if (!vma->vm_mm || +- (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) ++ (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) { ++ trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_SHARED_RO); + continue; ++ } + + /* + * Skip inaccessible VMAs to avoid any confusion between + * PROT_NONE and NUMA hinting ptes + */ +- if (!vma_is_accessible(vma)) ++ if (!vma_is_accessible(vma)) { ++ trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_INACCESSIBLE); + continue; ++ } + + /* Initialise new per-VMA NUMAB state. */ + if (!vma->numab_state) { +@@ -3316,8 +3353,15 @@ static void task_numa_work(struct callback_head *work) + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); + + /* Reset happens after 4 times scan delay of scan start */ +- vma->numab_state->next_pid_reset = vma->numab_state->next_scan + ++ vma->numab_state->pids_active_reset = vma->numab_state->next_scan + + msecs_to_jiffies(VMA_PID_RESET_PERIOD); ++ ++ /* ++ * Ensure prev_scan_seq does not match numa_scan_seq, ++ * to prevent VMAs being skipped prematurely on the ++ * first scan: ++ */ ++ vma->numab_state->prev_scan_seq = mm->numa_scan_seq - 1; + } + + /* +@@ -3325,23 +3369,35 @@ static void task_numa_work(struct callback_head *work) + * delay the scan for new VMAs. + */ + if (mm->numa_scan_seq && time_before(jiffies, +- vma->numab_state->next_scan)) ++ vma->numab_state->next_scan)) { ++ trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_SCAN_DELAY); + continue; ++ } + +- /* Do not scan the VMA if task has not accessed */ +- if (!vma_is_accessed(vma)) ++ /* RESET access PIDs regularly for old VMAs. */ ++ if (mm->numa_scan_seq && ++ time_after(jiffies, vma->numab_state->pids_active_reset)) { ++ vma->numab_state->pids_active_reset = vma->numab_state->pids_active_reset + ++ msecs_to_jiffies(VMA_PID_RESET_PERIOD); ++ vma->numab_state->pids_active[0] = READ_ONCE(vma->numab_state->pids_active[1]); ++ vma->numab_state->pids_active[1] = 0; ++ } ++ ++ /* Do not rescan VMAs twice within the same sequence. */ ++ if (vma->numab_state->prev_scan_seq == mm->numa_scan_seq) { ++ mm->numa_scan_offset = vma->vm_end; ++ trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_SEQ_COMPLETED); + continue; ++ } + + /* +- * RESET access PIDs regularly for old VMAs. Resetting after checking +- * vma for recent access to avoid clearing PID info before access.. ++ * Do not scan the VMA if task has not accessed it, unless no other ++ * VMA candidate exists. + */ +- if (mm->numa_scan_seq && +- time_after(jiffies, vma->numab_state->next_pid_reset)) { +- vma->numab_state->next_pid_reset = vma->numab_state->next_pid_reset + +- msecs_to_jiffies(VMA_PID_RESET_PERIOD); +- vma->numab_state->access_pids[0] = READ_ONCE(vma->numab_state->access_pids[1]); +- vma->numab_state->access_pids[1] = 0; ++ if (!vma_pids_forced && !vma_is_accessed(mm, vma)) { ++ vma_pids_skipped = true; ++ trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_PID_INACTIVE); ++ continue; + } + + do { +@@ -3368,8 +3424,28 @@ static void task_numa_work(struct callback_head *work) + + cond_resched(); + } while (end != vma->vm_end); ++ ++ /* VMA scan is complete, do not scan until next sequence. */ ++ vma->numab_state->prev_scan_seq = mm->numa_scan_seq; ++ ++ /* ++ * Only force scan within one VMA at a time, to limit the ++ * cost of scanning a potentially uninteresting VMA. ++ */ ++ if (vma_pids_forced) ++ break; + } for_each_vma(vmi, vma); + ++ /* ++ * If no VMAs are remaining and VMAs were skipped due to the PID ++ * not accessing the VMA previously, then force a scan to ensure ++ * forward progress: ++ */ ++ if (!vma && !vma_pids_forced && vma_pids_skipped) { ++ vma_pids_forced = true; ++ goto retry_pids; ++ } ++ + out: + /* + * It is possible to reach the end of the VMA list but the last few +@@ -8209,16 +8285,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ + if (test_tsk_need_resched(curr)) + return; + +- /* Idle tasks are by definition preempted by non-idle tasks. */ +- if (unlikely(task_has_idle_policy(curr)) && +- likely(!task_has_idle_policy(p))) +- goto preempt; +- +- /* +- * Batch and idle tasks do not preempt non-idle tasks (their preemption +- * is driven by the tick): +- */ +- if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) ++ if (!sched_feat(WAKEUP_PREEMPTION)) + return; + + find_matching_se(&se, &pse); +@@ -8228,7 +8295,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ + pse_is_idle = se_is_idle(pse); + + /* +- * Preempt an idle group in favor of a non-idle group (and don't preempt ++ * Preempt an idle entity in favor of a non-idle entity (and don't preempt + * in the inverse case). + */ + if (cse_is_idle && !pse_is_idle) +@@ -8236,9 +8303,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ + if (cse_is_idle != pse_is_idle) + return; + ++ /* ++ * BATCH and IDLE tasks do not preempt others. ++ */ ++ if (unlikely(p->policy != SCHED_NORMAL)) ++ return; ++ + cfs_rq = cfs_rq_of(se); + update_curr(cfs_rq); +- + /* + * XXX pick_eevdf(cfs_rq) != se ? + */ +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c +index cc29bf49f71597..eca858bde80470 100644 +--- a/kernel/trace/bpf_trace.c ++++ b/kernel/trace/bpf_trace.c +@@ -1220,7 +1220,8 @@ static const struct bpf_func_proto bpf_get_func_arg_proto = { + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, +- .arg3_type = ARG_PTR_TO_LONG, ++ .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, ++ .arg3_size = sizeof(u64), + }; + + BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value) +@@ -1236,7 +1237,8 @@ static const struct bpf_func_proto bpf_get_func_ret_proto = { + .func = get_func_ret, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +- .arg2_type = ARG_PTR_TO_LONG, ++ .arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, ++ .arg2_size = sizeof(u64), + }; + + BPF_CALL_1(get_func_arg_cnt, void *, ctx) +@@ -3283,17 +3285,20 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr + uprobes[i].ref_ctr_offset, + &uprobes[i].consumer); + if (err) { +- bpf_uprobe_unregister(&path, uprobes, i); +- goto error_free; ++ link->cnt = i; ++ goto error_unregister; + } + } + + err = bpf_link_prime(&link->link, &link_primer); + if (err) +- goto error_free; ++ goto error_unregister; + + return bpf_link_settle(&link_primer); + ++error_unregister: ++ bpf_uprobe_unregister(&path, uprobes, link->cnt); ++ + error_free: + kvfree(uprobes); + kfree(link); +diff --git a/lib/debugobjects.c b/lib/debugobjects.c +index c90834a209b585..9d401355d560d2 100644 +--- a/lib/debugobjects.c ++++ b/lib/debugobjects.c +@@ -141,13 +141,14 @@ static void fill_pool(void) + * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical + * sections. + */ +- while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) { ++ while (READ_ONCE(obj_nr_tofree) && ++ READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) { + raw_spin_lock_irqsave(&pool_lock, flags); + /* + * Recheck with the lock held as the worker thread might have + * won the race and freed the global free list already. + */ +- while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { ++ while (obj_nr_tofree && (obj_pool_free < debug_objects_pool_min_level)) { + obj = hlist_entry(obj_to_free.first, typeof(*obj), node); + hlist_del(&obj->node); + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); +diff --git a/lib/sbitmap.c b/lib/sbitmap.c +index 9307bf17a8175f..1d5e1574869225 100644 +--- a/lib/sbitmap.c ++++ b/lib/sbitmap.c +@@ -65,7 +65,7 @@ static inline bool sbitmap_deferred_clear(struct sbitmap_word *map, + { + unsigned long mask, word_mask; + +- guard(spinlock_irqsave)(&map->swap_lock); ++ guard(raw_spinlock_irqsave)(&map->swap_lock); + + if (!map->cleared) { + if (depth == 0) +@@ -136,7 +136,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, + } + + for (i = 0; i < sb->map_nr; i++) +- spin_lock_init(&sb->map[i].swap_lock); ++ raw_spin_lock_init(&sb->map[i].swap_lock); + + return 0; + } +diff --git a/lib/test_xarray.c b/lib/test_xarray.c +index e77d4856442c3f..542926da61a3ed 100644 +--- a/lib/test_xarray.c ++++ b/lib/test_xarray.c +@@ -1756,6 +1756,97 @@ static noinline void check_get_order(struct xarray *xa) + } + } + ++static noinline void check_xas_get_order(struct xarray *xa) ++{ ++ XA_STATE(xas, xa, 0); ++ ++ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; ++ unsigned int order; ++ unsigned long i, j; ++ ++ for (order = 0; order < max_order; order++) { ++ for (i = 0; i < 10; i++) { ++ xas_set_order(&xas, i << order, order); ++ do { ++ xas_lock(&xas); ++ xas_store(&xas, xa_mk_value(i)); ++ xas_unlock(&xas); ++ } while (xas_nomem(&xas, GFP_KERNEL)); ++ ++ for (j = i << order; j < (i + 1) << order; j++) { ++ xas_set_order(&xas, j, 0); ++ rcu_read_lock(); ++ xas_load(&xas); ++ XA_BUG_ON(xa, xas_get_order(&xas) != order); ++ rcu_read_unlock(); ++ } ++ ++ xas_lock(&xas); ++ xas_set_order(&xas, i << order, order); ++ xas_store(&xas, NULL); ++ xas_unlock(&xas); ++ } ++ } ++} ++ ++static noinline void check_xas_conflict_get_order(struct xarray *xa) ++{ ++ XA_STATE(xas, xa, 0); ++ ++ void *entry; ++ int only_once; ++ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; ++ unsigned int order; ++ unsigned long i, j, k; ++ ++ for (order = 0; order < max_order; order++) { ++ for (i = 0; i < 10; i++) { ++ xas_set_order(&xas, i << order, order); ++ do { ++ xas_lock(&xas); ++ xas_store(&xas, xa_mk_value(i)); ++ xas_unlock(&xas); ++ } while (xas_nomem(&xas, GFP_KERNEL)); ++ ++ /* ++ * Ensure xas_get_order works with xas_for_each_conflict. ++ */ ++ j = i << order; ++ for (k = 0; k < order; k++) { ++ only_once = 0; ++ xas_set_order(&xas, j + (1 << k), k); ++ xas_lock(&xas); ++ xas_for_each_conflict(&xas, entry) { ++ XA_BUG_ON(xa, entry != xa_mk_value(i)); ++ XA_BUG_ON(xa, xas_get_order(&xas) != order); ++ only_once++; ++ } ++ XA_BUG_ON(xa, only_once != 1); ++ xas_unlock(&xas); ++ } ++ ++ if (order < max_order - 1) { ++ only_once = 0; ++ xas_set_order(&xas, (i & ~1UL) << order, order + 1); ++ xas_lock(&xas); ++ xas_for_each_conflict(&xas, entry) { ++ XA_BUG_ON(xa, entry != xa_mk_value(i)); ++ XA_BUG_ON(xa, xas_get_order(&xas) != order); ++ only_once++; ++ } ++ XA_BUG_ON(xa, only_once != 1); ++ xas_unlock(&xas); ++ } ++ ++ xas_set_order(&xas, i << order, order); ++ xas_lock(&xas); ++ xas_store(&xas, NULL); ++ xas_unlock(&xas); ++ } ++ } ++} ++ ++ + static noinline void check_destroy(struct xarray *xa) + { + unsigned long index; +@@ -1805,6 +1896,8 @@ static int xarray_checks(void) + check_reserve(&xa0); + check_multi_store(&array); + check_get_order(&array); ++ check_xas_get_order(&array); ++ check_xas_conflict_get_order(&array); + check_xa_alloc(); + check_find(&array); + check_find_entry(&array); +diff --git a/lib/xarray.c b/lib/xarray.c +index 39f07bfc4dccac..da79128ad754fc 100644 +--- a/lib/xarray.c ++++ b/lib/xarray.c +@@ -1750,39 +1750,52 @@ void *xa_store_range(struct xarray *xa, unsigned long first, + EXPORT_SYMBOL(xa_store_range); + + /** +- * xa_get_order() - Get the order of an entry. +- * @xa: XArray. +- * @index: Index of the entry. ++ * xas_get_order() - Get the order of an entry. ++ * @xas: XArray operation state. ++ * ++ * Called after xas_load, the xas should not be in an error state. + * + * Return: A number between 0 and 63 indicating the order of the entry. + */ +-int xa_get_order(struct xarray *xa, unsigned long index) ++int xas_get_order(struct xa_state *xas) + { +- XA_STATE(xas, xa, index); +- void *entry; + int order = 0; + +- rcu_read_lock(); +- entry = xas_load(&xas); +- +- if (!entry) +- goto unlock; +- +- if (!xas.xa_node) +- goto unlock; ++ if (!xas->xa_node) ++ return 0; + + for (;;) { +- unsigned int slot = xas.xa_offset + (1 << order); ++ unsigned int slot = xas->xa_offset + (1 << order); + + if (slot >= XA_CHUNK_SIZE) + break; +- if (!xa_is_sibling(xas.xa_node->slots[slot])) ++ if (!xa_is_sibling(xa_entry(xas->xa, xas->xa_node, slot))) + break; + order++; + } + +- order += xas.xa_node->shift; +-unlock: ++ order += xas->xa_node->shift; ++ return order; ++} ++EXPORT_SYMBOL_GPL(xas_get_order); ++ ++/** ++ * xa_get_order() - Get the order of an entry. ++ * @xa: XArray. ++ * @index: Index of the entry. ++ * ++ * Return: A number between 0 and 63 indicating the order of the entry. ++ */ ++int xa_get_order(struct xarray *xa, unsigned long index) ++{ ++ XA_STATE(xas, xa, index); ++ int order = 0; ++ void *entry; ++ ++ rcu_read_lock(); ++ entry = xas_load(&xas); ++ if (entry) ++ order = xas_get_order(&xas); + rcu_read_unlock(); + + return order; +diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c +index 88a2c35e1b5971..5627b00fca296e 100644 +--- a/lib/xz/xz_crc32.c ++++ b/lib/xz/xz_crc32.c +@@ -29,7 +29,7 @@ STATIC_RW_DATA uint32_t xz_crc32_table[256]; + + XZ_EXTERN void xz_crc32_init(void) + { +- const uint32_t poly = CRC32_POLY_LE; ++ const uint32_t poly = 0xEDB88320; + + uint32_t i; + uint32_t j; +diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h +index bf1e94ec7873cf..d9fd49b45fd758 100644 +--- a/lib/xz/xz_private.h ++++ b/lib/xz/xz_private.h +@@ -105,10 +105,6 @@ + # endif + #endif + +-#ifndef CRC32_POLY_LE +-#define CRC32_POLY_LE 0xedb88320 +-#endif +- + /* + * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used + * before calling xz_dec_lzma2_run(). +diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c +index cf8a9fc5c9d1a6..530f01fedd3554 100644 +--- a/mm/damon/vaddr.c ++++ b/mm/damon/vaddr.c +@@ -126,6 +126,7 @@ static int __damon_va_three_regions(struct mm_struct *mm, + * If this is too slow, it can be optimised to examine the maple + * tree gaps. + */ ++ rcu_read_lock(); + for_each_vma(vmi, vma) { + unsigned long gap; + +@@ -146,6 +147,7 @@ static int __damon_va_three_regions(struct mm_struct *mm, + next: + prev = vma; + } ++ rcu_read_unlock(); + + if (!sz_range(&second_gap) || !sz_range(&first_gap)) + return -EINVAL; +diff --git a/mm/filemap.c b/mm/filemap.c +index 2662c416e7fa82..e6c112f3a211fe 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -846,6 +846,8 @@ noinline int __filemap_add_folio(struct address_space *mapping, + { + XA_STATE(xas, &mapping->i_pages, index); + int huge = folio_test_hugetlb(folio); ++ void *alloced_shadow = NULL; ++ int alloced_order = 0; + bool charged = false; + long nr = 1; + +@@ -868,13 +870,10 @@ noinline int __filemap_add_folio(struct address_space *mapping, + folio->mapping = mapping; + folio->index = xas.xa_index; + +- do { +- unsigned int order = xa_get_order(xas.xa, xas.xa_index); ++ for (;;) { ++ int order = -1, split_order = 0; + void *entry, *old = NULL; + +- if (order > folio_order(folio)) +- xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index), +- order, gfp); + xas_lock_irq(&xas); + xas_for_each_conflict(&xas, entry) { + old = entry; +@@ -882,19 +881,33 @@ noinline int __filemap_add_folio(struct address_space *mapping, + xas_set_err(&xas, -EEXIST); + goto unlock; + } ++ /* ++ * If a larger entry exists, ++ * it will be the first and only entry iterated. ++ */ ++ if (order == -1) ++ order = xas_get_order(&xas); ++ } ++ ++ /* entry may have changed before we re-acquire the lock */ ++ if (alloced_order && (old != alloced_shadow || order != alloced_order)) { ++ xas_destroy(&xas); ++ alloced_order = 0; + } + + if (old) { +- if (shadowp) +- *shadowp = old; +- /* entry may have been split before we acquired lock */ +- order = xa_get_order(xas.xa, xas.xa_index); +- if (order > folio_order(folio)) { ++ if (order > 0 && order > folio_order(folio)) { + /* How to handle large swap entries? */ + BUG_ON(shmem_mapping(mapping)); ++ if (!alloced_order) { ++ split_order = order; ++ goto unlock; ++ } + xas_split(&xas, old, order); + xas_reset(&xas); + } ++ if (shadowp) ++ *shadowp = old; + } + + xas_store(&xas, folio); +@@ -910,9 +923,24 @@ noinline int __filemap_add_folio(struct address_space *mapping, + __lruvec_stat_mod_folio(folio, + NR_FILE_THPS, nr); + } ++ + unlock: + xas_unlock_irq(&xas); +- } while (xas_nomem(&xas, gfp)); ++ ++ /* split needed, alloc here and retry. */ ++ if (split_order) { ++ xas_split_alloc(&xas, old, split_order, gfp); ++ if (xas_error(&xas)) ++ goto error; ++ alloced_shadow = old; ++ alloced_order = split_order; ++ xas_reset(&xas); ++ continue; ++ } ++ ++ if (!xas_nomem(&xas, gfp)) ++ break; ++ } + + if (xas_error(&xas)) + goto error; +diff --git a/mm/mmap.c b/mm/mmap.c +index 84e49e8bf0100e..6530e9cac45875 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -3025,8 +3025,12 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, + flags |= MAP_LOCKED; + + file = get_file(vma->vm_file); ++ ret = security_mmap_file(vma->vm_file, prot, flags); ++ if (ret) ++ goto out_fput; + ret = do_mmap(vma->vm_file, start, size, + prot, flags, 0, pgoff, &populate, NULL); ++out_fput: + fput(file); + out: + mmap_write_unlock(mm); +diff --git a/mm/util.c b/mm/util.c +index 2d5a309c4e548b..08d49489655221 100644 +--- a/mm/util.c ++++ b/mm/util.c +@@ -434,7 +434,7 @@ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) + if (gap + pad > gap) + gap += pad; + +- if (gap < MIN_GAP) ++ if (gap < MIN_GAP && MIN_GAP < MAX_GAP) + gap = MIN_GAP; + else if (gap > MAX_GAP) + gap = MAX_GAP; +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c +index d8a01eb016ad08..ed95a87ef9ab6d 100644 +--- a/net/bluetooth/hci_conn.c ++++ b/net/bluetooth/hci_conn.c +@@ -107,8 +107,7 @@ void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status) + * where a timeout + cancel does indicate an actual failure. + */ + if (status && status != HCI_ERROR_UNKNOWN_CONN_ID) +- mgmt_connect_failed(hdev, &conn->dst, conn->type, +- conn->dst_type, status); ++ mgmt_connect_failed(hdev, conn, status); + + /* The connection attempt was doing scan for new RPA, and is + * in scan phase. If params are not associated with any other +@@ -1233,8 +1232,7 @@ void hci_conn_failed(struct hci_conn *conn, u8 status) + hci_le_conn_failed(conn, status); + break; + case ACL_LINK: +- mgmt_connect_failed(hdev, &conn->dst, conn->type, +- conn->dst_type, status); ++ mgmt_connect_failed(hdev, conn, status); + break; + } + +diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c +index af7817a7c585bb..75515a1d2923aa 100644 +--- a/net/bluetooth/hci_sync.c ++++ b/net/bluetooth/hci_sync.c +@@ -5391,7 +5391,10 @@ int hci_stop_discovery_sync(struct hci_dev *hdev) + if (!e) + return 0; + +- return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr); ++ /* Ignore cancel errors since it should interfere with stopping ++ * of the discovery. ++ */ ++ hci_remote_name_cancel_sync(hdev, &e->data.bdaddr); + } + + return 0; +diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c +index 4ae9029b5785f4..149aff29e56469 100644 +--- a/net/bluetooth/mgmt.c ++++ b/net/bluetooth/mgmt.c +@@ -9724,13 +9724,18 @@ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, + mgmt_pending_remove(cmd); + } + +-void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, +- u8 addr_type, u8 status) ++void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status) + { + struct mgmt_ev_connect_failed ev; + +- bacpy(&ev.addr.bdaddr, bdaddr); +- ev.addr.type = link_to_bdaddr(link_type, addr_type); ++ if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { ++ mgmt_device_disconnected(hdev, &conn->dst, conn->type, ++ conn->dst_type, status, true); ++ return; ++ } ++ ++ bacpy(&ev.addr.bdaddr, &conn->dst); ++ ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type); + ev.status = mgmt_status(status); + + mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL); +diff --git a/net/can/bcm.c b/net/can/bcm.c +index 00208ee13e578b..a1f5db0fd5d4fd 100644 +--- a/net/can/bcm.c ++++ b/net/can/bcm.c +@@ -1429,8 +1429,10 @@ static void bcm_notify(struct bcm_sock *bo, unsigned long msg, + /* remove device reference, if this is our bound device */ + if (bo->bound && bo->ifindex == dev->ifindex) { + #if IS_ENABLED(CONFIG_PROC_FS) +- if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) ++ if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) { + remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir); ++ bo->bcm_proc_read = NULL; ++ } + #endif + bo->bound = 0; + bo->ifindex = 0; +diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c +index 4be73de5033cb7..319f47df33300c 100644 +--- a/net/can/j1939/transport.c ++++ b/net/can/j1939/transport.c +@@ -1179,10 +1179,10 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer) + break; + case -ENETDOWN: + /* In this case we should get a netdev_event(), all active +- * sessions will be cleared by +- * j1939_cancel_all_active_sessions(). So handle this as an +- * error, but let j1939_cancel_all_active_sessions() do the +- * cleanup including propagation of the error to user space. ++ * sessions will be cleared by j1939_cancel_active_session(). ++ * So handle this as an error, but let ++ * j1939_cancel_active_session() do the cleanup including ++ * propagation of the error to user space. + */ + break; + case -EOVERFLOW: +diff --git a/net/core/filter.c b/net/core/filter.c +index be313928d272c6..8bfd46a070c167 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -6222,20 +6222,25 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb, + int ret = BPF_MTU_CHK_RET_FRAG_NEEDED; + struct net_device *dev = skb->dev; + int skb_len, dev_len; +- int mtu; ++ int mtu = 0; + +- if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) +- return -EINVAL; ++ if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) { ++ ret = -EINVAL; ++ goto out; ++ } + +- if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) +- return -EINVAL; ++ if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) { ++ ret = -EINVAL; ++ goto out; ++ } + + dev = __dev_via_ifindex(dev, ifindex); +- if (unlikely(!dev)) +- return -ENODEV; ++ if (unlikely(!dev)) { ++ ret = -ENODEV; ++ goto out; ++ } + + mtu = READ_ONCE(dev->mtu); +- + dev_len = mtu + dev->hard_header_len; + + /* If set use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */ +@@ -6253,15 +6258,12 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb, + */ + if (skb_is_gso(skb)) { + ret = BPF_MTU_CHK_RET_SUCCESS; +- + if (flags & BPF_MTU_CHK_SEGS && + !skb_gso_validate_network_len(skb, mtu)) + ret = BPF_MTU_CHK_RET_SEGS_TOOBIG; + } + out: +- /* BPF verifier guarantees valid pointer */ + *mtu_len = mtu; +- + return ret; + } + +@@ -6271,19 +6273,21 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp, + struct net_device *dev = xdp->rxq->dev; + int xdp_len = xdp->data_end - xdp->data; + int ret = BPF_MTU_CHK_RET_SUCCESS; +- int mtu, dev_len; ++ int mtu = 0, dev_len; + + /* XDP variant doesn't support multi-buffer segment check (yet) */ +- if (unlikely(flags)) +- return -EINVAL; ++ if (unlikely(flags)) { ++ ret = -EINVAL; ++ goto out; ++ } + + dev = __dev_via_ifindex(dev, ifindex); +- if (unlikely(!dev)) +- return -ENODEV; ++ if (unlikely(!dev)) { ++ ret = -ENODEV; ++ goto out; ++ } + + mtu = READ_ONCE(dev->mtu); +- +- /* Add L2-header as dev MTU is L3 size */ + dev_len = mtu + dev->hard_header_len; + + /* Use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */ +@@ -6293,10 +6297,8 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp, + xdp_len += len_diff; /* minus result pass check */ + if (xdp_len > dev_len) + ret = BPF_MTU_CHK_RET_FRAG_NEEDED; +- +- /* BPF verifier guarantees valid pointer */ ++out: + *mtu_len = mtu; +- + return ret; + } + +@@ -6306,7 +6308,8 @@ static const struct bpf_func_proto bpf_skb_check_mtu_proto = { + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, +- .arg3_type = ARG_PTR_TO_INT, ++ .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, ++ .arg3_size = sizeof(u32), + .arg4_type = ARG_ANYTHING, + .arg5_type = ARG_ANYTHING, + }; +@@ -6317,7 +6320,8 @@ static const struct bpf_func_proto bpf_xdp_check_mtu_proto = { + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, +- .arg3_type = ARG_PTR_TO_INT, ++ .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, ++ .arg3_size = sizeof(u32), + .arg4_type = ARG_ANYTHING, + .arg5_type = ARG_ANYTHING, + }; +diff --git a/net/core/sock_map.c b/net/core/sock_map.c +index a37143d181f95f..2afac40bb83ca1 100644 +--- a/net/core/sock_map.c ++++ b/net/core/sock_map.c +@@ -1171,6 +1171,7 @@ static void sock_hash_free(struct bpf_map *map) + sock_put(elem->sk); + sock_hash_free_elem(htab, elem); + } ++ cond_resched(); + } + + /* wait for psock readers accessing its map link */ +diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c +index 3b221643206de4..9dffdd876fef50 100644 +--- a/net/ipv4/icmp.c ++++ b/net/ipv4/icmp.c +@@ -222,57 +222,59 @@ int sysctl_icmp_msgs_per_sec __read_mostly = 1000; + int sysctl_icmp_msgs_burst __read_mostly = 50; + + static struct { +- spinlock_t lock; +- u32 credit; ++ atomic_t credit; + u32 stamp; +-} icmp_global = { +- .lock = __SPIN_LOCK_UNLOCKED(icmp_global.lock), +-}; ++} icmp_global; + + /** + * icmp_global_allow - Are we allowed to send one more ICMP message ? + * + * Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec. + * Returns false if we reached the limit and can not send another packet. +- * Note: called with BH disabled ++ * Works in tandem with icmp_global_consume(). + */ + bool icmp_global_allow(void) + { +- u32 credit, delta, incr = 0, now = (u32)jiffies; +- bool rc = false; ++ u32 delta, now, oldstamp; ++ int incr, new, old; + +- /* Check if token bucket is empty and cannot be refilled +- * without taking the spinlock. The READ_ONCE() are paired +- * with the following WRITE_ONCE() in this same function. ++ /* Note: many cpus could find this condition true. ++ * Then later icmp_global_consume() could consume more credits, ++ * this is an acceptable race. + */ +- if (!READ_ONCE(icmp_global.credit)) { +- delta = min_t(u32, now - READ_ONCE(icmp_global.stamp), HZ); +- if (delta < HZ / 50) +- return false; +- } ++ if (atomic_read(&icmp_global.credit) > 0) ++ return true; + +- spin_lock(&icmp_global.lock); +- delta = min_t(u32, now - icmp_global.stamp, HZ); +- if (delta >= HZ / 50) { +- incr = READ_ONCE(sysctl_icmp_msgs_per_sec) * delta / HZ; +- if (incr) +- WRITE_ONCE(icmp_global.stamp, now); +- } +- credit = min_t(u32, icmp_global.credit + incr, +- READ_ONCE(sysctl_icmp_msgs_burst)); +- if (credit) { +- /* We want to use a credit of one in average, but need to randomize +- * it for security reasons. +- */ +- credit = max_t(int, credit - get_random_u32_below(3), 0); +- rc = true; ++ now = jiffies; ++ oldstamp = READ_ONCE(icmp_global.stamp); ++ delta = min_t(u32, now - oldstamp, HZ); ++ if (delta < HZ / 50) ++ return false; ++ ++ incr = READ_ONCE(sysctl_icmp_msgs_per_sec) * delta / HZ; ++ if (!incr) ++ return false; ++ ++ if (cmpxchg(&icmp_global.stamp, oldstamp, now) == oldstamp) { ++ old = atomic_read(&icmp_global.credit); ++ do { ++ new = min(old + incr, READ_ONCE(sysctl_icmp_msgs_burst)); ++ } while (!atomic_try_cmpxchg(&icmp_global.credit, &old, new)); + } +- WRITE_ONCE(icmp_global.credit, credit); +- spin_unlock(&icmp_global.lock); +- return rc; ++ return true; + } + EXPORT_SYMBOL(icmp_global_allow); + ++void icmp_global_consume(void) ++{ ++ int credits = get_random_u32_below(3); ++ ++ /* Note: this might make icmp_global.credit negative. */ ++ if (credits) ++ atomic_sub(credits, &icmp_global.credit); ++} ++EXPORT_SYMBOL(icmp_global_consume); ++ + static bool icmpv4_mask_allow(struct net *net, int type, int code) + { + if (type > NR_ICMP_TYPES) +@@ -289,14 +291,16 @@ static bool icmpv4_mask_allow(struct net *net, int type, int code) + return false; + } + +-static bool icmpv4_global_allow(struct net *net, int type, int code) ++static bool icmpv4_global_allow(struct net *net, int type, int code, ++ bool *apply_ratelimit) + { + if (icmpv4_mask_allow(net, type, code)) + return true; + +- if (icmp_global_allow()) ++ if (icmp_global_allow()) { ++ *apply_ratelimit = true; + return true; +- ++ } + __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITGLOBAL); + return false; + } +@@ -306,15 +310,16 @@ static bool icmpv4_global_allow(struct net *net, int type, int code) + */ + + static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, +- struct flowi4 *fl4, int type, int code) ++ struct flowi4 *fl4, int type, int code, ++ bool apply_ratelimit) + { + struct dst_entry *dst = &rt->dst; + struct inet_peer *peer; + bool rc = true; + int vif; + +- if (icmpv4_mask_allow(net, type, code)) +- goto out; ++ if (!apply_ratelimit) ++ return true; + + /* No rate limit on loopback */ + if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) +@@ -329,6 +334,8 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, + out: + if (!rc) + __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITHOST); ++ else ++ icmp_global_consume(); + return rc; + } + +@@ -400,6 +407,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) + struct ipcm_cookie ipc; + struct rtable *rt = skb_rtable(skb); + struct net *net = dev_net(rt->dst.dev); ++ bool apply_ratelimit = false; + struct flowi4 fl4; + struct sock *sk; + struct inet_sock *inet; +@@ -411,11 +419,11 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) + if (ip_options_echo(net, &icmp_param->replyopts.opt.opt, skb)) + return; + +- /* Needed by both icmp_global_allow and icmp_xmit_lock */ ++ /* Needed by both icmpv4_global_allow and icmp_xmit_lock */ + local_bh_disable(); + +- /* global icmp_msgs_per_sec */ +- if (!icmpv4_global_allow(net, type, code)) ++ /* is global icmp_msgs_per_sec exhausted ? */ ++ if (!icmpv4_global_allow(net, type, code, &apply_ratelimit)) + goto out_bh_enable; + + sk = icmp_xmit_lock(net); +@@ -448,7 +456,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) + rt = ip_route_output_key(net, &fl4); + if (IS_ERR(rt)) + goto out_unlock; +- if (icmpv4_xrlim_allow(net, rt, &fl4, type, code)) ++ if (icmpv4_xrlim_allow(net, rt, &fl4, type, code, apply_ratelimit)) + icmp_push_reply(sk, icmp_param, &fl4, &ipc, &rt); + ip_rt_put(rt); + out_unlock: +@@ -592,6 +600,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, + int room; + struct icmp_bxm icmp_param; + struct rtable *rt = skb_rtable(skb_in); ++ bool apply_ratelimit = false; + struct ipcm_cookie ipc; + struct flowi4 fl4; + __be32 saddr; +@@ -673,7 +682,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, + } + } + +- /* Needed by both icmp_global_allow and icmp_xmit_lock */ ++ /* Needed by both icmpv4_global_allow and icmp_xmit_lock */ + local_bh_disable(); + + /* Check global sysctl_icmp_msgs_per_sec ratelimit, unless +@@ -681,7 +690,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, + * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow) + */ + if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) && +- !icmpv4_global_allow(net, type, code)) ++ !icmpv4_global_allow(net, type, code, &apply_ratelimit)) + goto out_bh_enable; + + sk = icmp_xmit_lock(net); +@@ -740,7 +749,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, + goto out_unlock; + + /* peer icmp_ratelimit */ +- if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code)) ++ if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code, apply_ratelimit)) + goto ende; + + /* RFC says return as much as we can without exceeding 576 bytes. */ +diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig +index 08d4b7132d4c45..1c9c686d9522f7 100644 +--- a/net/ipv6/Kconfig ++++ b/net/ipv6/Kconfig +@@ -323,6 +323,7 @@ config IPV6_RPL_LWTUNNEL + bool "IPv6: RPL Source Routing Header support" + depends on IPV6 + select LWTUNNEL ++ select DST_CACHE + help + Support for RFC6554 RPL Source Routing Header using the lightweight + tunnels mechanism. +diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c +index 93a594a901d12b..a790294d310484 100644 +--- a/net/ipv6/icmp.c ++++ b/net/ipv6/icmp.c +@@ -175,14 +175,16 @@ static bool icmpv6_mask_allow(struct net *net, int type) + return false; + } + +-static bool icmpv6_global_allow(struct net *net, int type) ++static bool icmpv6_global_allow(struct net *net, int type, ++ bool *apply_ratelimit) + { + if (icmpv6_mask_allow(net, type)) + return true; + +- if (icmp_global_allow()) ++ if (icmp_global_allow()) { ++ *apply_ratelimit = true; + return true; +- ++ } + __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITGLOBAL); + return false; + } +@@ -191,13 +193,13 @@ static bool icmpv6_global_allow(struct net *net, int type) + * Check the ICMP output rate limit + */ + static bool icmpv6_xrlim_allow(struct sock *sk, u8 type, +- struct flowi6 *fl6) ++ struct flowi6 *fl6, bool apply_ratelimit) + { + struct net *net = sock_net(sk); + struct dst_entry *dst; + bool res = false; + +- if (icmpv6_mask_allow(net, type)) ++ if (!apply_ratelimit) + return true; + + /* +@@ -228,6 +230,8 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type, + if (!res) + __ICMP6_INC_STATS(net, ip6_dst_idev(dst), + ICMP6_MIB_RATELIMITHOST); ++ else ++ icmp_global_consume(); + dst_release(dst); + return res; + } +@@ -452,6 +456,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + struct net *net; + struct ipv6_pinfo *np; + const struct in6_addr *saddr = NULL; ++ bool apply_ratelimit = false; + struct dst_entry *dst; + struct icmp6hdr tmp_hdr; + struct flowi6 fl6; +@@ -533,11 +538,12 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + return; + } + +- /* Needed by both icmp_global_allow and icmpv6_xmit_lock */ ++ /* Needed by both icmpv6_global_allow and icmpv6_xmit_lock */ + local_bh_disable(); + + /* Check global sysctl_icmp_msgs_per_sec ratelimit */ +- if (!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, type)) ++ if (!(skb->dev->flags & IFF_LOOPBACK) && ++ !icmpv6_global_allow(net, type, &apply_ratelimit)) + goto out_bh_enable; + + mip6_addr_swap(skb, parm); +@@ -575,7 +581,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + + np = inet6_sk(sk); + +- if (!icmpv6_xrlim_allow(sk, type, &fl6)) ++ if (!icmpv6_xrlim_allow(sk, type, &fl6, apply_ratelimit)) + goto out; + + tmp_hdr.icmp6_type = type; +@@ -717,6 +723,7 @@ static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb) + struct ipv6_pinfo *np; + const struct in6_addr *saddr = NULL; + struct icmp6hdr *icmph = icmp6_hdr(skb); ++ bool apply_ratelimit = false; + struct icmp6hdr tmp_hdr; + struct flowi6 fl6; + struct icmpv6_msg msg; +@@ -781,8 +788,9 @@ static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb) + goto out; + + /* Check the ratelimit */ +- if ((!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, ICMPV6_ECHO_REPLY)) || +- !icmpv6_xrlim_allow(sk, ICMPV6_ECHO_REPLY, &fl6)) ++ if ((!(skb->dev->flags & IFF_LOOPBACK) && ++ !icmpv6_global_allow(net, ICMPV6_ECHO_REPLY, &apply_ratelimit)) || ++ !icmpv6_xrlim_allow(sk, ICMPV6_ECHO_REPLY, &fl6, apply_ratelimit)) + goto out_dst_release; + + idev = __in6_dev_get(skb->dev); +diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c +index 71d692728230e0..690d1c04769133 100644 +--- a/net/ipv6/netfilter/nf_reject_ipv6.c ++++ b/net/ipv6/netfilter/nf_reject_ipv6.c +@@ -223,33 +223,23 @@ void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb, + const struct tcphdr *oth, unsigned int otcplen) + { + struct tcphdr *tcph; +- int needs_ack; + + skb_reset_transport_header(nskb); +- tcph = skb_put(nskb, sizeof(struct tcphdr)); ++ tcph = skb_put_zero(nskb, sizeof(struct tcphdr)); + /* Truncate to length (no data) */ + tcph->doff = sizeof(struct tcphdr)/4; + tcph->source = oth->dest; + tcph->dest = oth->source; + + if (oth->ack) { +- needs_ack = 0; + tcph->seq = oth->ack_seq; +- tcph->ack_seq = 0; + } else { +- needs_ack = 1; + tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + + otcplen - (oth->doff<<2)); +- tcph->seq = 0; ++ tcph->ack = 1; + } + +- /* Reset flags */ +- ((u_int8_t *)tcph)[13] = 0; + tcph->rst = 1; +- tcph->ack = needs_ack; +- tcph->window = 0; +- tcph->urg_ptr = 0; +- tcph->check = 0; + + /* Adjust TCP checksum */ + tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr, +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 0299886dbeb913..a9104c4c1c02d9 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -175,7 +175,7 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev) + struct net_device *rt_dev = rt->dst.dev; + bool handled = false; + +- if (rt_idev->dev == dev) { ++ if (rt_idev && rt_idev->dev == dev) { + rt->rt6i_idev = in6_dev_get(blackhole_netdev); + in6_dev_put(rt_idev); + handled = true; +diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c +index 2c83b7586422dd..db3c19a42e1ca7 100644 +--- a/net/ipv6/rpl_iptunnel.c ++++ b/net/ipv6/rpl_iptunnel.c +@@ -263,10 +263,8 @@ static int rpl_input(struct sk_buff *skb) + rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate); + + err = rpl_do_srh(skb, rlwt); +- if (unlikely(err)) { +- kfree_skb(skb); +- return err; +- } ++ if (unlikely(err)) ++ goto drop; + + local_bh_disable(); + dst = dst_cache_get(&rlwt->cache); +@@ -286,9 +284,13 @@ static int rpl_input(struct sk_buff *skb) + + err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); + if (unlikely(err)) +- return err; ++ goto drop; + + return dst_input(skb); ++ ++drop: ++ kfree_skb(skb); ++ return err; + } + + static int nla_put_rpl_srh(struct sk_buff *skb, int attrtype, +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c +index a7c39e895b1e53..fae701248f0580 100644 +--- a/net/mac80211/iface.c ++++ b/net/mac80211/iface.c +@@ -466,6 +466,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do + { + struct ieee80211_local *local = sdata->local; + unsigned long flags; ++ struct sk_buff_head freeq; + struct sk_buff *skb, *tmp; + u32 hw_reconf_flags = 0; + int i, flushed; +@@ -652,18 +653,32 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do + skb_queue_purge(&sdata->status_queue); + } + ++ /* ++ * Since ieee80211_free_txskb() may issue __dev_queue_xmit() ++ * which should be called with interrupts enabled, reclamation ++ * is done in two phases: ++ */ ++ __skb_queue_head_init(&freeq); ++ ++ /* unlink from local queues... */ + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { + skb_queue_walk_safe(&local->pending[i], skb, tmp) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + if (info->control.vif == &sdata->vif) { + __skb_unlink(skb, &local->pending[i]); +- ieee80211_free_txskb(&local->hw, skb); ++ __skb_queue_tail(&freeq, skb); + } + } + } + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + ++ /* ... and perform actual reclamation with interrupts enabled. */ ++ skb_queue_walk_safe(&freeq, skb, tmp) { ++ __skb_unlink(skb, &freeq); ++ ieee80211_free_txskb(&local->hw, skb); ++ } ++ + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + ieee80211_txq_remove_vlan(local, sdata); + +diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c +index 5bedd9cef414d0..2517a5521a5780 100644 +--- a/net/mac80211/offchannel.c ++++ b/net/mac80211/offchannel.c +@@ -940,6 +940,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, + } + + IEEE80211_SKB_CB(skb)->flags = flags; ++ IEEE80211_SKB_CB(skb)->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK; + + skb->dev = sdata->dev; + +diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c +index 3cf252418bd38c..78e7ac6c0af0b0 100644 +--- a/net/mac80211/rate.c ++++ b/net/mac80211/rate.c +@@ -890,7 +890,7 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif, + if (ieee80211_is_tx_data(skb)) + rate_control_apply_mask(sdata, sta, sband, dest, max_rates); + +- if (!(info->control.flags & IEEE80211_TX_CTRL_SCAN_TX)) ++ if (!(info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK)) + mask = sdata->rc_rateidx_mask[info->band]; + + if (dest[0].idx < 0) +diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c +index 3d68db738cde45..b58d061333c523 100644 +--- a/net/mac80211/scan.c ++++ b/net/mac80211/scan.c +@@ -636,7 +636,7 @@ static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata, + cpu_to_le16(IEEE80211_SN_TO_SEQ(sn)); + } + IEEE80211_SKB_CB(skb)->flags |= tx_flags; +- IEEE80211_SKB_CB(skb)->control.flags |= IEEE80211_TX_CTRL_SCAN_TX; ++ IEEE80211_SKB_CB(skb)->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK; + ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band); + } + } +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c +index 415e951e4138a5..45a093d3f1fa7f 100644 +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -706,7 +706,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) + txrc.skb = tx->skb; + txrc.reported_rate.idx = -1; + +- if (unlikely(info->control.flags & IEEE80211_TX_CTRL_SCAN_TX)) { ++ if (unlikely(info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK)) { + txrc.rate_idx_mask = ~0; + } else { + txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band]; +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c +index 4dab45039f3497..282e9644f6fdd6 100644 +--- a/net/netfilter/nf_conntrack_netlink.c ++++ b/net/netfilter/nf_conntrack_netlink.c +@@ -381,7 +381,7 @@ static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct) + #define ctnetlink_dump_secctx(a, b) (0) + #endif + +-#ifdef CONFIG_NF_CONNTRACK_LABELS ++#ifdef CONFIG_NF_CONNTRACK_EVENTS + static inline int ctnetlink_label_size(const struct nf_conn *ct) + { + struct nf_conn_labels *labels = nf_ct_labels_find(ct); +@@ -390,6 +390,7 @@ static inline int ctnetlink_label_size(const struct nf_conn *ct) + return 0; + return nla_total_size(sizeof(labels->bits)); + } ++#endif + + static int + ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct) +@@ -410,10 +411,6 @@ ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct) + + return 0; + } +-#else +-#define ctnetlink_dump_labels(a, b) (0) +-#define ctnetlink_label_size(a) (0) +-#endif + + #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple) + +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index da5684e3fd08c8..da7fd3919ce483 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -1778,7 +1778,7 @@ static int nft_dump_basechain_hook(struct sk_buff *skb, int family, + if (!hook_list) + hook_list = &basechain->hook_list; + +- list_for_each_entry(hook, hook_list, list) { ++ list_for_each_entry_rcu(hook, hook_list, list) { + if (!first) + first = hook; + +@@ -4462,7 +4462,7 @@ int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result) + return -ERANGE; + + ms *= NSEC_PER_MSEC; +- *result = nsecs_to_jiffies64(ms); ++ *result = nsecs_to_jiffies64(ms) ? : !!ms; + return 0; + } + +@@ -6691,17 +6691,23 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, + return err; + } else if (set->flags & NFT_SET_TIMEOUT && + !(flags & NFT_SET_ELEM_INTERVAL_END)) { +- timeout = READ_ONCE(set->timeout); ++ timeout = set->timeout; + } + + expiration = 0; + if (nla[NFTA_SET_ELEM_EXPIRATION] != NULL) { + if (!(set->flags & NFT_SET_TIMEOUT)) + return -EINVAL; ++ if (timeout == 0) ++ return -EOPNOTSUPP; ++ + err = nf_msecs_to_jiffies64(nla[NFTA_SET_ELEM_EXPIRATION], + &expiration); + if (err) + return err; ++ ++ if (expiration > timeout) ++ return -ERANGE; + } + + if (nla[NFTA_SET_ELEM_EXPR]) { +@@ -6792,7 +6798,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, + if (err < 0) + goto err_parse_key_end; + +- if (timeout != READ_ONCE(set->timeout)) { ++ if (timeout != set->timeout) { + err = nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT); + if (err < 0) + goto err_parse_key_end; +@@ -8937,7 +8943,7 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable) + flowtable->data.type->setup(&flowtable->data, hook->ops.dev, + FLOW_BLOCK_UNBIND); + list_del_rcu(&hook->list); +- kfree(hook); ++ kfree_rcu(hook, rcu); + } + kfree(flowtable->name); + module_put(flowtable->data.type->owner); +diff --git a/net/qrtr/af_qrtr.c b/net/qrtr/af_qrtr.c +index 41ece61eb57ab7..00c51cf693f3d0 100644 +--- a/net/qrtr/af_qrtr.c ++++ b/net/qrtr/af_qrtr.c +@@ -884,7 +884,7 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb, + + mutex_lock(&qrtr_node_lock); + list_for_each_entry(node, &qrtr_all_nodes, item) { +- skbn = skb_clone(skb, GFP_KERNEL); ++ skbn = pskb_copy(skb, GFP_KERNEL); + if (!skbn) + break; + skb_set_owner_w(skbn, skb->sk); +diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c +index 593846d252143c..114fef65f92eab 100644 +--- a/net/tipc/bcast.c ++++ b/net/tipc/bcast.c +@@ -320,8 +320,8 @@ static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb, + { + struct tipc_msg *hdr, *_hdr; + struct sk_buff_head tmpq; ++ u16 cong_link_cnt = 0; + struct sk_buff *_skb; +- u16 cong_link_cnt; + int rc = 0; + + /* Is a cluster supporting with new capabilities ? */ +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index 2b2dc46dc701f9..4ce23762b1c954 100644 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -9679,7 +9679,8 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, + return ERR_PTR(-ENOMEM); + + if (n_ssids) +- request->ssids = (void *)&request->channels[n_channels]; ++ request->ssids = (void *)request + ++ struct_size(request, channels, n_channels); + request->n_ssids = n_ssids; + if (ie_len) { + if (n_ssids) +diff --git a/net/wireless/scan.c b/net/wireless/scan.c +index 4d88e797ae49ff..4fc6279750ea15 100644 +--- a/net/wireless/scan.c ++++ b/net/wireless/scan.c +@@ -3209,8 +3209,8 @@ int cfg80211_wext_siwscan(struct net_device *dev, + n_channels = ieee80211_get_num_supported_channels(wiphy); + } + +- creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) + +- n_channels * sizeof(void *), ++ creq = kzalloc(struct_size(creq, channels, n_channels) + ++ sizeof(struct cfg80211_ssid), + GFP_ATOMIC); + if (!creq) + return -ENOMEM; +@@ -3218,7 +3218,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, + creq->wiphy = wiphy; + creq->wdev = dev->ieee80211_ptr; + /* SSIDs come after channels */ +- creq->ssids = (void *)&creq->channels[n_channels]; ++ creq->ssids = (void *)creq + struct_size(creq, channels, n_channels); + creq->n_channels = n_channels; + creq->n_ssids = 1; + creq->scan_start = jiffies; +diff --git a/net/wireless/sme.c b/net/wireless/sme.c +index 72d78dbc55ffdc..591cda99d72f57 100644 +--- a/net/wireless/sme.c ++++ b/net/wireless/sme.c +@@ -115,7 +115,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev) + n_channels = i; + } + request->n_channels = n_channels; +- request->ssids = (void *)&request->channels[n_channels]; ++ request->ssids = (void *)request + ++ struct_size(request, channels, n_channels); + request->n_ssids = 1; + + memcpy(request->ssids[0].ssid, wdev->conn->params.ssid, +diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile +index 4ccf4236031c1f..3fa16412db15ca 100644 +--- a/samples/bpf/Makefile ++++ b/samples/bpf/Makefile +@@ -166,6 +166,10 @@ BPF_EXTRA_CFLAGS += -I$(srctree)/arch/mips/include/asm/mach-generic + endif + endif + ++ifeq ($(ARCH), x86) ++BPF_EXTRA_CFLAGS += -fcf-protection ++endif ++ + TPROGS_CFLAGS += -Wall -O2 + TPROGS_CFLAGS += -Wmissing-prototypes + TPROGS_CFLAGS += -Wstrict-prototypes +@@ -394,7 +398,7 @@ $(obj)/%.o: $(src)/%.c + -Wno-gnu-variable-sized-type-not-at-end \ + -Wno-address-of-packed-member -Wno-tautological-compare \ + -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ +- -fno-asynchronous-unwind-tables -fcf-protection \ ++ -fno-asynchronous-unwind-tables \ + -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \ + -O2 -emit-llvm -Xclang -disable-llvm-passes -c $< -o - | \ + $(OPT) -O2 -mtriple=bpf-pc-linux | $(LLVM_DIS) | \ +diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c +index cfaf1d0e6a5f51..35933ae53b92cf 100644 +--- a/security/bpf/hooks.c ++++ b/security/bpf/hooks.c +@@ -24,7 +24,6 @@ static int __init bpf_lsm_init(void) + + struct lsm_blob_sizes bpf_lsm_blob_sizes __ro_after_init = { + .lbs_inode = sizeof(struct bpf_storage_blob), +- .lbs_task = sizeof(struct bpf_storage_blob), + }; + + DEFINE_LSM(bpf) = { +diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c +index e22aad7604e8ac..5dd1e164f9b13d 100644 +--- a/security/smack/smackfs.c ++++ b/security/smack/smackfs.c +@@ -932,7 +932,7 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, + } + if (rc >= 0) { + old_cat = skp->smk_netlabel.attr.mls.cat; +- skp->smk_netlabel.attr.mls.cat = ncats.attr.mls.cat; ++ rcu_assign_pointer(skp->smk_netlabel.attr.mls.cat, ncats.attr.mls.cat); + skp->smk_netlabel.attr.mls.lvl = ncats.attr.mls.lvl; + synchronize_rcu(); + netlbl_catmap_free(old_cat); +diff --git a/sound/pci/hda/cs35l41_hda_spi.c b/sound/pci/hda/cs35l41_hda_spi.c +index eb287aa5f78250..d95954ce55d81b 100644 +--- a/sound/pci/hda/cs35l41_hda_spi.c ++++ b/sound/pci/hda/cs35l41_hda_spi.c +@@ -38,6 +38,7 @@ static const struct spi_device_id cs35l41_hda_spi_id[] = { + { "cs35l41-hda", 0 }, + {} + }; ++MODULE_DEVICE_TABLE(spi, cs35l41_hda_spi_id); + + static const struct acpi_device_id cs35l41_acpi_hda_match[] = { + { "CSC3551", 0 }, +diff --git a/sound/pci/hda/tas2781_hda_i2c.c b/sound/pci/hda/tas2781_hda_i2c.c +index e5bb1fed26a0c8..980e6104c2f3f2 100644 +--- a/sound/pci/hda/tas2781_hda_i2c.c ++++ b/sound/pci/hda/tas2781_hda_i2c.c +@@ -95,9 +95,7 @@ static int tas2781_get_i2c_res(struct acpi_resource *ares, void *data) + static int tas2781_read_acpi(struct tasdevice_priv *p, const char *hid) + { + struct acpi_device *adev; +- struct device *physdev; + LIST_HEAD(resources); +- const char *sub; + int ret; + + adev = acpi_dev_get_first_match_dev(hid, NULL, -1); +@@ -113,18 +111,8 @@ static int tas2781_read_acpi(struct tasdevice_priv *p, const char *hid) + + acpi_dev_free_resource_list(&resources); + strscpy(p->dev_name, hid, sizeof(p->dev_name)); +- physdev = get_device(acpi_get_first_physical_node(adev)); + acpi_dev_put(adev); + +- /* No side-effect to the playback even if subsystem_id is NULL*/ +- sub = acpi_get_subsystem_id(ACPI_HANDLE(physdev)); +- if (IS_ERR(sub)) +- sub = NULL; +- +- p->acpi_subsystem_id = sub; +- +- put_device(physdev); +- + return 0; + + err: +@@ -722,7 +710,7 @@ static int tas2781_hda_i2c_probe(struct i2c_client *clt) + } else + return -ENODEV; + +- tas_hda->priv->irq_info.irq = clt->irq; ++ tas_hda->priv->irq = clt->irq; + ret = tas2781_read_acpi(tas_hda->priv, device_name); + if (ret) + return dev_err_probe(tas_hda->dev, ret, +diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c +index e3aca9c785a079..aa163ec4086223 100644 +--- a/sound/soc/codecs/rt5682.c ++++ b/sound/soc/codecs/rt5682.c +@@ -2903,8 +2903,10 @@ int rt5682_register_dai_clks(struct rt5682_priv *rt5682) + } + + if (dev->of_node) { +- devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, ++ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, + dai_clk_hw); ++ if (ret) ++ return ret; + } else { + ret = devm_clk_hw_register_clkdev(dev, dai_clk_hw, + init.name, +diff --git a/sound/soc/codecs/rt5682s.c b/sound/soc/codecs/rt5682s.c +index 68ac5ea50396d5..92c647d439ec7f 100644 +--- a/sound/soc/codecs/rt5682s.c ++++ b/sound/soc/codecs/rt5682s.c +@@ -2828,7 +2828,9 @@ static int rt5682s_register_dai_clks(struct snd_soc_component *component) + } + + if (dev->of_node) { +- devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, dai_clk_hw); ++ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, dai_clk_hw); ++ if (ret) ++ return ret; + } else { + ret = devm_clk_hw_register_clkdev(dev, dai_clk_hw, + init.name, dev_name(dev)); +diff --git a/sound/soc/codecs/tas2781-comlib.c b/sound/soc/codecs/tas2781-comlib.c +index 5d0e5348b361a5..0444cf90c5119f 100644 +--- a/sound/soc/codecs/tas2781-comlib.c ++++ b/sound/soc/codecs/tas2781-comlib.c +@@ -14,7 +14,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -406,9 +405,6 @@ EXPORT_SYMBOL_GPL(tasdevice_dsp_remove); + + void tasdevice_remove(struct tasdevice_priv *tas_priv) + { +- if (gpio_is_valid(tas_priv->irq_info.irq_gpio)) +- gpio_free(tas_priv->irq_info.irq_gpio); +- kfree(tas_priv->acpi_subsystem_id); + mutex_destroy(&tas_priv->codec_lock); + } + EXPORT_SYMBOL_GPL(tasdevice_remove); +diff --git a/sound/soc/codecs/tas2781-fmwlib.c b/sound/soc/codecs/tas2781-fmwlib.c +index 3639dcd0bbb2bc..629e2195a890b2 100644 +--- a/sound/soc/codecs/tas2781-fmwlib.c ++++ b/sound/soc/codecs/tas2781-fmwlib.c +@@ -13,7 +13,6 @@ + #include + #include + #include +-#include + #include + #include + #include +diff --git a/sound/soc/codecs/tas2781-i2c.c b/sound/soc/codecs/tas2781-i2c.c +index a9d179e3077398..43775c19444525 100644 +--- a/sound/soc/codecs/tas2781-i2c.c ++++ b/sound/soc/codecs/tas2781-i2c.c +@@ -21,7 +21,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + #include +@@ -616,7 +616,7 @@ static void tasdevice_parse_dt(struct tasdevice_priv *tas_priv) + { + struct i2c_client *client = (struct i2c_client *)tas_priv->client; + unsigned int dev_addrs[TASDEVICE_MAX_CHANNELS]; +- int rc, i, ndev = 0; ++ int i, ndev = 0; + + if (tas_priv->isacpi) { + ndev = device_property_read_u32_array(&client->dev, +@@ -631,64 +631,34 @@ static void tasdevice_parse_dt(struct tasdevice_priv *tas_priv) + "ti,audio-slots", dev_addrs, ndev); + } + +- tas_priv->irq_info.irq_gpio = ++ tas_priv->irq = + acpi_dev_gpio_irq_get(ACPI_COMPANION(&client->dev), 0); +- } else { ++ } else if (IS_ENABLED(CONFIG_OF)) { + struct device_node *np = tas_priv->dev->of_node; +-#ifdef CONFIG_OF +- const __be32 *reg, *reg_end; +- int len, sw, aw; +- +- aw = of_n_addr_cells(np); +- sw = of_n_size_cells(np); +- if (sw == 0) { +- reg = (const __be32 *)of_get_property(np, +- "reg", &len); +- reg_end = reg + len/sizeof(*reg); +- ndev = 0; +- do { +- dev_addrs[ndev] = of_read_number(reg, aw); +- reg += aw; +- ndev++; +- } while (reg < reg_end); +- } else { +- ndev = 1; +- dev_addrs[0] = client->addr; ++ u64 addr; ++ ++ for (i = 0; i < TASDEVICE_MAX_CHANNELS; i++) { ++ if (of_property_read_reg(np, i, &addr, NULL)) ++ break; ++ dev_addrs[ndev++] = addr; + } +-#else ++ ++ tas_priv->irq = of_irq_get(np, 0); ++ } else { + ndev = 1; + dev_addrs[0] = client->addr; +-#endif +- tas_priv->irq_info.irq_gpio = of_irq_get(np, 0); + } + tas_priv->ndev = ndev; + for (i = 0; i < ndev; i++) + tas_priv->tasdevice[i].dev_addr = dev_addrs[i]; + + tas_priv->reset = devm_gpiod_get_optional(&client->dev, +- "reset-gpios", GPIOD_OUT_HIGH); ++ "reset", GPIOD_OUT_HIGH); + if (IS_ERR(tas_priv->reset)) + dev_err(tas_priv->dev, "%s Can't get reset GPIO\n", + __func__); + + strcpy(tas_priv->dev_name, tasdevice_id[tas_priv->chip_id].name); +- +- if (gpio_is_valid(tas_priv->irq_info.irq_gpio)) { +- rc = gpio_request(tas_priv->irq_info.irq_gpio, +- "AUDEV-IRQ"); +- if (!rc) { +- gpio_direction_input( +- tas_priv->irq_info.irq_gpio); +- +- tas_priv->irq_info.irq = +- gpio_to_irq(tas_priv->irq_info.irq_gpio); +- } else +- dev_err(tas_priv->dev, "%s: GPIO %d request error\n", +- __func__, tas_priv->irq_info.irq_gpio); +- } else +- dev_err(tas_priv->dev, +- "Looking up irq-gpio property failed %d\n", +- tas_priv->irq_info.irq_gpio); + } + + static int tasdevice_i2c_probe(struct i2c_client *i2c) +diff --git a/sound/soc/loongson/loongson_card.c b/sound/soc/loongson/loongson_card.c +index 406ee8db1a3c5f..8cc54aedd00242 100644 +--- a/sound/soc/loongson/loongson_card.c ++++ b/sound/soc/loongson/loongson_card.c +@@ -127,8 +127,8 @@ static int loongson_card_parse_of(struct loongson_card_data *data) + codec = of_get_child_by_name(dev->of_node, "codec"); + if (!codec) { + dev_err(dev, "audio-codec property missing or invalid\n"); +- ret = -EINVAL; +- goto err; ++ of_node_put(cpu); ++ return -EINVAL; + } + + for (i = 0; i < card->num_links; i++) { +diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile +index d8288936c9120f..c4f1f1735af659 100644 +--- a/tools/bpf/runqslower/Makefile ++++ b/tools/bpf/runqslower/Makefile +@@ -15,6 +15,7 @@ INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../include/uapi) + CFLAGS := -g -Wall $(CLANG_CROSS_FLAGS) + CFLAGS += $(EXTRA_CFLAGS) + LDFLAGS += $(EXTRA_LDFLAGS) ++LDLIBS += -lelf -lz + + # Try to detect best kernel BTF source + KERNEL_REL := $(shell uname -r) +@@ -51,7 +52,7 @@ clean: + libbpf_hdrs: $(BPFOBJ) + + $(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ) +- $(QUIET_LINK)$(CC) $(CFLAGS) $^ -lelf -lz -o $@ ++ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDLIBS) -o $@ + + $(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \ + $(OUTPUT)/runqslower.bpf.o | libbpf_hdrs +diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c +index 92973420c0a5a8..0f1e5787b4edac 100644 +--- a/tools/perf/builtin-annotate.c ++++ b/tools/perf/builtin-annotate.c +@@ -378,7 +378,7 @@ static void hists__find_annotations(struct hists *hists, + /* skip missing symbols */ + nd = rb_next(nd); + } else if (use_browser == 1) { +- key = hist_entry__tui_annotate(he, evsel, NULL, &annotate_opts); ++ key = hist_entry__tui_annotate(he, evsel, NULL); + + switch (key) { + case -1: +diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c +index eb3ef5c24b6625..8aba0566546797 100644 +--- a/tools/perf/builtin-inject.c ++++ b/tools/perf/builtin-inject.c +@@ -2200,6 +2200,7 @@ int cmd_inject(int argc, const char **argv) + .finished_init = perf_event__repipe_op2_synth, + .compressed = perf_event__repipe_op4_synth, + .auxtrace = perf_event__repipe_auxtrace, ++ .dont_split_sample_group = true, + }, + .input_name = "-", + .samples = LIST_HEAD_INIT(inject.samples), +diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c +index 51499c20da01e8..865f321d729b69 100644 +--- a/tools/perf/builtin-mem.c ++++ b/tools/perf/builtin-mem.c +@@ -372,6 +372,7 @@ static int report_events(int argc, const char **argv, struct perf_mem *mem) + rep_argv[i] = argv[j]; + + ret = cmd_report(i, rep_argv); ++ free(new_sort_order); + free(rep_argv); + return ret; + } +diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c +index 2a8889c6d7f978..cd2f3f1a756330 100644 +--- a/tools/perf/builtin-report.c ++++ b/tools/perf/builtin-report.c +@@ -540,8 +540,7 @@ static int evlist__tui_block_hists_browse(struct evlist *evlist, struct report * + evlist__for_each_entry(evlist, pos) { + ret = report__browse_block_hists(&rep->block_reports[i++].hist, + rep->min_percent, pos, +- &rep->session->header.env, +- &annotate_opts); ++ &rep->session->header.env); + if (ret != 0) + return ret; + } +@@ -563,6 +562,7 @@ static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, c + struct hists *hists = evsel__hists(pos); + const char *evname = evsel__name(pos); + ++ i++; + if (symbol_conf.event_group && !evsel__is_group_leader(pos)) + continue; + +@@ -572,9 +572,8 @@ static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, c + hists__fprintf_nr_sample_events(hists, rep, evname, stdout); + + if (rep->total_cycles_mode) { +- report__browse_block_hists(&rep->block_reports[i++].hist, +- rep->min_percent, pos, +- NULL, NULL); ++ report__browse_block_hists(&rep->block_reports[i - 1].hist, ++ rep->min_percent, pos, NULL); + continue; + } + +@@ -669,7 +668,7 @@ static int report__browse_hists(struct report *rep) + } + + ret = evlist__tui_browse_hists(evlist, help, NULL, rep->min_percent, +- &session->header.env, true, &annotate_opts); ++ &session->header.env, true); + /* + * Usually "ret" is the last pressed key, and we only + * care if the key notifies us to switch data file. +diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c +index f21a655dd7f95e..42185da0f000a0 100644 +--- a/tools/perf/builtin-sched.c ++++ b/tools/perf/builtin-sched.c +@@ -2633,9 +2633,12 @@ static int timehist_sched_change_event(struct perf_tool *tool, + * - previous sched event is out of window - we are done + * - sample time is beyond window user cares about - reset it + * to close out stats for time window interest ++ * - If tprev is 0, that is, sched_in event for current task is ++ * not recorded, cannot determine whether sched_in event is ++ * within time window interest - ignore it + */ + if (ptime->end) { +- if (tprev > ptime->end) ++ if (!tprev || tprev > ptime->end) + goto out; + + if (t > ptime->end) +@@ -3068,7 +3071,8 @@ static int perf_sched__timehist(struct perf_sched *sched) + + if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) { + pr_err("Invalid time string\n"); +- return -EINVAL; ++ err = -EINVAL; ++ goto out; + } + + if (timehist_check_attr(sched, evlist) != 0) +diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c +index 6ac17763de0e00..1c1ec444d501ee 100644 +--- a/tools/perf/builtin-top.c ++++ b/tools/perf/builtin-top.c +@@ -646,8 +646,7 @@ static void *display_thread_tui(void *arg) + } + + ret = evlist__tui_browse_hists(top->evlist, help, &hbt, top->min_percent, +- &top->session->header.env, !top->record_opts.overwrite, +- &annotate_opts); ++ &top->session->header.env, !top->record_opts.overwrite); + if (ret == K_RELOAD) { + top->zero = true; + goto repeat; +diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c +index d9f9fa254a71f5..20f24d104da8e5 100644 +--- a/tools/perf/ui/browsers/annotate.c ++++ b/tools/perf/ui/browsers/annotate.c +@@ -27,7 +27,6 @@ struct annotate_browser { + struct rb_node *curr_hot; + struct annotation_line *selection; + struct arch *arch; +- struct annotation_options *opts; + bool searching_backwards; + char search_bf[128]; + }; +@@ -97,7 +96,7 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int + struct annotation_write_ops ops = { + .first_line = row == 0, + .current_entry = is_current_entry, +- .change_color = (!notes->options->hide_src_code && ++ .change_color = (!annotate_opts.hide_src_code && + (!is_current_entry || + (browser->use_navkeypressed && + !browser->navkeypressed))), +@@ -128,7 +127,7 @@ static int is_fused(struct annotate_browser *ab, struct disasm_line *cursor) + + while (pos && pos->al.offset == -1) { + pos = list_prev_entry(pos, al.node); +- if (!ab->opts->hide_src_code) ++ if (!annotate_opts.hide_src_code) + diff++; + } + +@@ -195,7 +194,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser) + return; + } + +- if (notes->options->hide_src_code) { ++ if (annotate_opts.hide_src_code) { + from = cursor->al.idx_asm; + to = target->idx_asm; + } else { +@@ -224,7 +223,7 @@ static unsigned int annotate_browser__refresh(struct ui_browser *browser) + int ret = ui_browser__list_head_refresh(browser); + int pcnt_width = annotation__pcnt_width(notes); + +- if (notes->options->jump_arrows) ++ if (annotate_opts.jump_arrows) + annotate_browser__draw_current_jump(browser); + + ui_browser__set_color(browser, HE_COLORSET_NORMAL); +@@ -258,7 +257,7 @@ static void disasm_rb_tree__insert(struct annotate_browser *browser, + parent = *p; + l = rb_entry(parent, struct annotation_line, rb_node); + +- if (disasm__cmp(al, l, browser->opts->percent_type) < 0) ++ if (disasm__cmp(al, l, annotate_opts.percent_type) < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; +@@ -294,11 +293,10 @@ static void annotate_browser__set_top(struct annotate_browser *browser, + static void annotate_browser__set_rb_top(struct annotate_browser *browser, + struct rb_node *nd) + { +- struct annotation *notes = browser__annotation(&browser->b); + struct annotation_line * pos = rb_entry(nd, struct annotation_line, rb_node); + u32 idx = pos->idx; + +- if (notes->options->hide_src_code) ++ if (annotate_opts.hide_src_code) + idx = pos->idx_asm; + annotate_browser__set_top(browser, pos, idx); + browser->curr_hot = nd; +@@ -331,7 +329,7 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser, + double percent; + + percent = annotation_data__percent(&pos->al.data[i], +- browser->opts->percent_type); ++ annotate_opts.percent_type); + + if (max_percent < percent) + max_percent = percent; +@@ -380,12 +378,12 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser) + browser->b.seek(&browser->b, offset, SEEK_CUR); + al = list_entry(browser->b.top, struct annotation_line, node); + +- if (notes->options->hide_src_code) { ++ if (annotate_opts.hide_src_code) { + if (al->idx_asm < offset) + offset = al->idx; + +- browser->b.nr_entries = notes->nr_entries; +- notes->options->hide_src_code = false; ++ browser->b.nr_entries = notes->src->nr_entries; ++ annotate_opts.hide_src_code = false; + browser->b.seek(&browser->b, -offset, SEEK_CUR); + browser->b.top_idx = al->idx - offset; + browser->b.index = al->idx; +@@ -402,8 +400,8 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser) + if (al->idx_asm < offset) + offset = al->idx_asm; + +- browser->b.nr_entries = notes->nr_asm_entries; +- notes->options->hide_src_code = true; ++ browser->b.nr_entries = notes->src->nr_asm_entries; ++ annotate_opts.hide_src_code = true; + browser->b.seek(&browser->b, -offset, SEEK_CUR); + browser->b.top_idx = al->idx_asm - offset; + browser->b.index = al->idx_asm; +@@ -435,7 +433,7 @@ static void ui_browser__init_asm_mode(struct ui_browser *browser) + { + struct annotation *notes = browser__annotation(browser); + ui_browser__reset_index(browser); +- browser->nr_entries = notes->nr_asm_entries; ++ browser->nr_entries = notes->src->nr_asm_entries; + } + + static int sym_title(struct symbol *sym, struct map *map, char *title, +@@ -483,8 +481,8 @@ static bool annotate_browser__callq(struct annotate_browser *browser, + target_ms.map = ms->map; + target_ms.sym = dl->ops.target.sym; + annotation__unlock(notes); +- symbol__tui_annotate(&target_ms, evsel, hbt, browser->opts); +- sym_title(ms->sym, ms->map, title, sizeof(title), browser->opts->percent_type); ++ symbol__tui_annotate(&target_ms, evsel, hbt); ++ sym_title(ms->sym, ms->map, title, sizeof(title), annotate_opts.percent_type); + ui_browser__show_title(&browser->b, title); + return true; + } +@@ -659,7 +657,6 @@ bool annotate_browser__continue_search_reverse(struct annotate_browser *browser, + + static int annotate_browser__show(struct ui_browser *browser, char *title, const char *help) + { +- struct annotate_browser *ab = container_of(browser, struct annotate_browser, b); + struct map_symbol *ms = browser->priv; + struct symbol *sym = ms->sym; + char symbol_dso[SYM_TITLE_MAX_SIZE]; +@@ -667,7 +664,7 @@ static int annotate_browser__show(struct ui_browser *browser, char *title, const + if (ui_browser__show(browser, title, help) < 0) + return -1; + +- sym_title(sym, ms->map, symbol_dso, sizeof(symbol_dso), ab->opts->percent_type); ++ sym_title(sym, ms->map, symbol_dso, sizeof(symbol_dso), annotate_opts.percent_type); + + ui_browser__gotorc_title(browser, 0, 0); + ui_browser__set_color(browser, HE_COLORSET_ROOT); +@@ -809,7 +806,7 @@ static int annotate_browser__run(struct annotate_browser *browser, + annotate_browser__show(&browser->b, title, help); + continue; + case 'k': +- notes->options->show_linenr = !notes->options->show_linenr; ++ annotate_opts.show_linenr = !annotate_opts.show_linenr; + continue; + case 'l': + annotate_browser__show_full_location (&browser->b); +@@ -822,18 +819,18 @@ static int annotate_browser__run(struct annotate_browser *browser, + ui_helpline__puts(help); + continue; + case 'o': +- notes->options->use_offset = !notes->options->use_offset; ++ annotate_opts.use_offset = !annotate_opts.use_offset; + annotation__update_column_widths(notes); + continue; + case 'O': +- if (++notes->options->offset_level > ANNOTATION__MAX_OFFSET_LEVEL) +- notes->options->offset_level = ANNOTATION__MIN_OFFSET_LEVEL; ++ if (++annotate_opts.offset_level > ANNOTATION__MAX_OFFSET_LEVEL) ++ annotate_opts.offset_level = ANNOTATION__MIN_OFFSET_LEVEL; + continue; + case 'j': +- notes->options->jump_arrows = !notes->options->jump_arrows; ++ annotate_opts.jump_arrows = !annotate_opts.jump_arrows; + continue; + case 'J': +- notes->options->show_nr_jumps = !notes->options->show_nr_jumps; ++ annotate_opts.show_nr_jumps = !annotate_opts.show_nr_jumps; + annotation__update_column_widths(notes); + continue; + case '/': +@@ -860,7 +857,7 @@ static int annotate_browser__run(struct annotate_browser *browser, + browser->b.height, + browser->b.index, + browser->b.top_idx, +- notes->nr_asm_entries); ++ notes->src->nr_asm_entries); + } + continue; + case K_ENTER: +@@ -897,15 +894,15 @@ static int annotate_browser__run(struct annotate_browser *browser, + annotation__update_column_widths(notes); + continue; + case 'c': +- if (notes->options->show_minmax_cycle) +- notes->options->show_minmax_cycle = false; ++ if (annotate_opts.show_minmax_cycle) ++ annotate_opts.show_minmax_cycle = false; + else +- notes->options->show_minmax_cycle = true; ++ annotate_opts.show_minmax_cycle = true; + annotation__update_column_widths(notes); + continue; + case 'p': + case 'b': +- switch_percent_type(browser->opts, key == 'b'); ++ switch_percent_type(&annotate_opts, key == 'b'); + hists__scnprintf_title(hists, title, sizeof(title)); + annotate_browser__show(&browser->b, title, help); + continue; +@@ -932,26 +929,23 @@ static int annotate_browser__run(struct annotate_browser *browser, + } + + int map_symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, +- struct hist_browser_timer *hbt, +- struct annotation_options *opts) ++ struct hist_browser_timer *hbt) + { +- return symbol__tui_annotate(ms, evsel, hbt, opts); ++ return symbol__tui_annotate(ms, evsel, hbt); + } + + int hist_entry__tui_annotate(struct hist_entry *he, struct evsel *evsel, +- struct hist_browser_timer *hbt, +- struct annotation_options *opts) ++ struct hist_browser_timer *hbt) + { + /* reset abort key so that it can get Ctrl-C as a key */ + SLang_reset_tty(); + SLang_init_tty(0, 0, 0); + +- return map_symbol__tui_annotate(&he->ms, evsel, hbt, opts); ++ return map_symbol__tui_annotate(&he->ms, evsel, hbt); + } + + int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, +- struct hist_browser_timer *hbt, +- struct annotation_options *opts) ++ struct hist_browser_timer *hbt) + { + struct symbol *sym = ms->sym; + struct annotation *notes = symbol__annotation(sym); +@@ -965,7 +959,6 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, + .priv = ms, + .use_navkeypressed = true, + }, +- .opts = opts, + }; + struct dso *dso; + int ret = -1, err; +@@ -991,12 +984,12 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, + + ui_helpline__push("Press ESC to exit"); + +- browser.b.width = notes->max_line_len; +- browser.b.nr_entries = notes->nr_entries; ++ browser.b.width = notes->src->max_line_len; ++ browser.b.nr_entries = notes->src->nr_entries; + browser.b.entries = ¬es->src->source, + browser.b.width += 18; /* Percentage */ + +- if (notes->options->hide_src_code) ++ if (annotate_opts.hide_src_code) + ui_browser__init_asm_mode(&browser.b); + + ret = annotate_browser__run(&browser, evsel, hbt); +diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c +index 70db5a71790569..bb59d27642ccf2 100644 +--- a/tools/perf/ui/browsers/hists.c ++++ b/tools/perf/ui/browsers/hists.c +@@ -2250,8 +2250,7 @@ struct hist_browser *hist_browser__new(struct hists *hists) + static struct hist_browser * + perf_evsel_browser__new(struct evsel *evsel, + struct hist_browser_timer *hbt, +- struct perf_env *env, +- struct annotation_options *annotation_opts) ++ struct perf_env *env) + { + struct hist_browser *browser = hist_browser__new(evsel__hists(evsel)); + +@@ -2259,7 +2258,6 @@ perf_evsel_browser__new(struct evsel *evsel, + browser->hbt = hbt; + browser->env = env; + browser->title = hists_browser__scnprintf_title; +- browser->annotation_opts = annotation_opts; + } + return browser; + } +@@ -2432,8 +2430,8 @@ do_annotate(struct hist_browser *browser, struct popup_action *act) + struct hist_entry *he; + int err; + +- if (!browser->annotation_opts->objdump_path && +- perf_env__lookup_objdump(browser->env, &browser->annotation_opts->objdump_path)) ++ if (!annotate_opts.objdump_path && ++ perf_env__lookup_objdump(browser->env, &annotate_opts.objdump_path)) + return 0; + + notes = symbol__annotation(act->ms.sym); +@@ -2445,8 +2443,7 @@ do_annotate(struct hist_browser *browser, struct popup_action *act) + else + evsel = hists_to_evsel(browser->hists); + +- err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt, +- browser->annotation_opts); ++ err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt); + he = hist_browser__selected_entry(browser); + /* + * offer option to annotate the other branch source or target +@@ -2943,11 +2940,10 @@ static void hist_browser__update_percent_limit(struct hist_browser *hb, + + static int evsel__hists_browse(struct evsel *evsel, int nr_events, const char *helpline, + bool left_exits, struct hist_browser_timer *hbt, float min_pcnt, +- struct perf_env *env, bool warn_lost_event, +- struct annotation_options *annotation_opts) ++ struct perf_env *env, bool warn_lost_event) + { + struct hists *hists = evsel__hists(evsel); +- struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env, annotation_opts); ++ struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env); + struct branch_info *bi = NULL; + #define MAX_OPTIONS 16 + char *options[MAX_OPTIONS]; +@@ -3398,7 +3394,6 @@ static int evsel__hists_browse(struct evsel *evsel, int nr_events, const char *h + struct evsel_menu { + struct ui_browser b; + struct evsel *selection; +- struct annotation_options *annotation_opts; + bool lost_events, lost_events_warned; + float min_pcnt; + struct perf_env *env; +@@ -3499,8 +3494,7 @@ static int perf_evsel_menu__run(struct evsel_menu *menu, + hbt->timer(hbt->arg); + key = evsel__hists_browse(pos, nr_events, help, true, hbt, + menu->min_pcnt, menu->env, +- warn_lost_event, +- menu->annotation_opts); ++ warn_lost_event); + ui_browser__show_title(&menu->b, title); + switch (key) { + case K_TAB: +@@ -3557,7 +3551,7 @@ static bool filter_group_entries(struct ui_browser *browser __maybe_unused, + + static int __evlist__tui_browse_hists(struct evlist *evlist, int nr_entries, const char *help, + struct hist_browser_timer *hbt, float min_pcnt, struct perf_env *env, +- bool warn_lost_event, struct annotation_options *annotation_opts) ++ bool warn_lost_event) + { + struct evsel *pos; + struct evsel_menu menu = { +@@ -3572,7 +3566,6 @@ static int __evlist__tui_browse_hists(struct evlist *evlist, int nr_entries, con + }, + .min_pcnt = min_pcnt, + .env = env, +- .annotation_opts = annotation_opts, + }; + + ui_helpline__push("Press ESC to exit"); +@@ -3607,8 +3600,7 @@ static bool evlist__single_entry(struct evlist *evlist) + } + + int evlist__tui_browse_hists(struct evlist *evlist, const char *help, struct hist_browser_timer *hbt, +- float min_pcnt, struct perf_env *env, bool warn_lost_event, +- struct annotation_options *annotation_opts) ++ float min_pcnt, struct perf_env *env, bool warn_lost_event) + { + int nr_entries = evlist->core.nr_entries; + +@@ -3617,7 +3609,7 @@ single_entry: { + struct evsel *first = evlist__first(evlist); + + return evsel__hists_browse(first, nr_entries, help, false, hbt, min_pcnt, +- env, warn_lost_event, annotation_opts); ++ env, warn_lost_event); + } + } + +@@ -3635,7 +3627,7 @@ single_entry: { + } + + return __evlist__tui_browse_hists(evlist, nr_entries, help, hbt, min_pcnt, env, +- warn_lost_event, annotation_opts); ++ warn_lost_event); + } + + static int block_hists_browser__title(struct hist_browser *browser, char *bf, +@@ -3654,8 +3646,7 @@ static int block_hists_browser__title(struct hist_browser *browser, char *bf, + } + + int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel, +- float min_percent, struct perf_env *env, +- struct annotation_options *annotation_opts) ++ float min_percent, struct perf_env *env) + { + struct hists *hists = &bh->block_hists; + struct hist_browser *browser; +@@ -3672,7 +3663,6 @@ int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel, + browser->title = block_hists_browser__title; + browser->min_pcnt = min_percent; + browser->env = env; +- browser->annotation_opts = annotation_opts; + + /* reset abort key so that it can get Ctrl-C as a key */ + SLang_reset_tty(); +diff --git a/tools/perf/ui/browsers/hists.h b/tools/perf/ui/browsers/hists.h +index 1e938d9ffa5ee2..de46f6c56b0ef0 100644 +--- a/tools/perf/ui/browsers/hists.h ++++ b/tools/perf/ui/browsers/hists.h +@@ -4,7 +4,6 @@ + + #include "ui/browser.h" + +-struct annotation_options; + struct evsel; + + struct hist_browser { +@@ -15,7 +14,6 @@ struct hist_browser { + struct hist_browser_timer *hbt; + struct pstack *pstack; + struct perf_env *env; +- struct annotation_options *annotation_opts; + struct evsel *block_evsel; + int print_seq; + bool show_dso; +diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c +index 83da2bceb59594..6dfe11cbf30e2b 100644 +--- a/tools/perf/util/annotate.c ++++ b/tools/perf/util/annotate.c +@@ -813,7 +813,6 @@ static __maybe_unused void annotated_source__delete(struct annotated_source *src + if (src == NULL) + return; + zfree(&src->histograms); +- zfree(&src->cycles_hist); + free(src); + } + +@@ -848,18 +847,6 @@ static int annotated_source__alloc_histograms(struct annotated_source *src, + return src->histograms ? 0 : -1; + } + +-/* The cycles histogram is lazily allocated. */ +-static int symbol__alloc_hist_cycles(struct symbol *sym) +-{ +- struct annotation *notes = symbol__annotation(sym); +- const size_t size = symbol__size(sym); +- +- notes->src->cycles_hist = calloc(size, sizeof(struct cyc_hist)); +- if (notes->src->cycles_hist == NULL) +- return -1; +- return 0; +-} +- + void symbol__annotate_zero_histograms(struct symbol *sym) + { + struct annotation *notes = symbol__annotation(sym); +@@ -868,9 +855,10 @@ void symbol__annotate_zero_histograms(struct symbol *sym) + if (notes->src != NULL) { + memset(notes->src->histograms, 0, + notes->src->nr_histograms * notes->src->sizeof_sym_hist); +- if (notes->src->cycles_hist) +- memset(notes->src->cycles_hist, 0, +- symbol__size(sym) * sizeof(struct cyc_hist)); ++ } ++ if (notes->branch && notes->branch->cycles_hist) { ++ memset(notes->branch->cycles_hist, 0, ++ symbol__size(sym) * sizeof(struct cyc_hist)); + } + annotation__unlock(notes); + } +@@ -961,23 +949,33 @@ static int __symbol__inc_addr_samples(struct map_symbol *ms, + return 0; + } + ++static struct annotated_branch *annotation__get_branch(struct annotation *notes) ++{ ++ if (notes == NULL) ++ return NULL; ++ ++ if (notes->branch == NULL) ++ notes->branch = zalloc(sizeof(*notes->branch)); ++ ++ return notes->branch; ++} ++ + static struct cyc_hist *symbol__cycles_hist(struct symbol *sym) + { + struct annotation *notes = symbol__annotation(sym); ++ struct annotated_branch *branch; + +- if (notes->src == NULL) { +- notes->src = annotated_source__new(); +- if (notes->src == NULL) +- return NULL; +- goto alloc_cycles_hist; +- } ++ branch = annotation__get_branch(notes); ++ if (branch == NULL) ++ return NULL; + +- if (!notes->src->cycles_hist) { +-alloc_cycles_hist: +- symbol__alloc_hist_cycles(sym); ++ if (branch->cycles_hist == NULL) { ++ const size_t size = symbol__size(sym); ++ ++ branch->cycles_hist = calloc(size, sizeof(struct cyc_hist)); + } + +- return notes->src->cycles_hist; ++ return branch->cycles_hist; + } + + struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists) +@@ -1086,6 +1084,14 @@ static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 + return n_insn; + } + ++static void annotated_branch__delete(struct annotated_branch *branch) ++{ ++ if (branch) { ++ zfree(&branch->cycles_hist); ++ free(branch); ++ } ++} ++ + static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch) + { + unsigned n_insn; +@@ -1094,6 +1100,7 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 + + n_insn = annotation__count_insn(notes, start, end); + if (n_insn && ch->num && ch->cycles) { ++ struct annotated_branch *branch; + float ipc = n_insn / ((double)ch->cycles / (double)ch->num); + + /* Hide data when there are too many overlaps. */ +@@ -1109,10 +1116,11 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 + } + } + +- if (cover_insn) { +- notes->hit_cycles += ch->cycles; +- notes->hit_insn += n_insn * ch->num; +- notes->cover_insn += cover_insn; ++ branch = annotation__get_branch(notes); ++ if (cover_insn && branch) { ++ branch->hit_cycles += ch->cycles; ++ branch->hit_insn += n_insn * ch->num; ++ branch->cover_insn += cover_insn; + } + } + } +@@ -1122,19 +1130,19 @@ static int annotation__compute_ipc(struct annotation *notes, size_t size) + int err = 0; + s64 offset; + +- if (!notes->src || !notes->src->cycles_hist) ++ if (!notes->branch || !notes->branch->cycles_hist) + return 0; + +- notes->total_insn = annotation__count_insn(notes, 0, size - 1); +- notes->hit_cycles = 0; +- notes->hit_insn = 0; +- notes->cover_insn = 0; ++ notes->branch->total_insn = annotation__count_insn(notes, 0, size - 1); ++ notes->branch->hit_cycles = 0; ++ notes->branch->hit_insn = 0; ++ notes->branch->cover_insn = 0; + + annotation__lock(notes); + for (offset = size - 1; offset >= 0; --offset) { + struct cyc_hist *ch; + +- ch = ¬es->src->cycles_hist[offset]; ++ ch = ¬es->branch->cycles_hist[offset]; + if (ch && ch->cycles) { + struct annotation_line *al; + +@@ -1153,13 +1161,12 @@ static int annotation__compute_ipc(struct annotation *notes, size_t size) + al->cycles->max = ch->cycles_max; + al->cycles->min = ch->cycles_min; + } +- notes->have_cycles = true; + } + } + + if (err) { + while (++offset < (s64)size) { +- struct cyc_hist *ch = ¬es->src->cycles_hist[offset]; ++ struct cyc_hist *ch = ¬es->branch->cycles_hist[offset]; + + if (ch && ch->cycles) { + struct annotation_line *al = notes->offsets[offset]; +@@ -1325,6 +1332,7 @@ int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool r + void annotation__exit(struct annotation *notes) + { + annotated_source__delete(notes->src); ++ annotated_branch__delete(notes->branch); + } + + static struct sharded_mutex *sharded_mutex; +@@ -2817,19 +2825,20 @@ void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym) + void annotation__set_offsets(struct annotation *notes, s64 size) + { + struct annotation_line *al; ++ struct annotated_source *src = notes->src; + +- notes->max_line_len = 0; +- notes->nr_entries = 0; +- notes->nr_asm_entries = 0; ++ src->max_line_len = 0; ++ src->nr_entries = 0; ++ src->nr_asm_entries = 0; + +- list_for_each_entry(al, ¬es->src->source, node) { ++ list_for_each_entry(al, &src->source, node) { + size_t line_len = strlen(al->line); + +- if (notes->max_line_len < line_len) +- notes->max_line_len = line_len; +- al->idx = notes->nr_entries++; ++ if (src->max_line_len < line_len) ++ src->max_line_len = line_len; ++ al->idx = src->nr_entries++; + if (al->offset != -1) { +- al->idx_asm = notes->nr_asm_entries++; ++ al->idx_asm = src->nr_asm_entries++; + /* + * FIXME: short term bandaid to cope with assembly + * routines that comes with labels in the same column +@@ -3074,13 +3083,14 @@ static void disasm_line__write(struct disasm_line *dl, struct annotation *notes, + static void ipc_coverage_string(char *bf, int size, struct annotation *notes) + { + double ipc = 0.0, coverage = 0.0; ++ struct annotated_branch *branch = annotation__get_branch(notes); + +- if (notes->hit_cycles) +- ipc = notes->hit_insn / ((double)notes->hit_cycles); ++ if (branch && branch->hit_cycles) ++ ipc = branch->hit_insn / ((double)branch->hit_cycles); + +- if (notes->total_insn) { +- coverage = notes->cover_insn * 100.0 / +- ((double)notes->total_insn); ++ if (branch && branch->total_insn) { ++ coverage = branch->cover_insn * 100.0 / ++ ((double)branch->total_insn); + } + + scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)", +@@ -3105,7 +3115,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati + int printed; + + if (first_line && (al->offset == -1 || percent_max == 0.0)) { +- if (notes->have_cycles && al->cycles) { ++ if (notes->branch && al->cycles) { + if (al->cycles->ipc == 0.0 && al->cycles->avg == 0) + show_title = true; + } else +@@ -3142,7 +3152,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati + } + } + +- if (notes->have_cycles) { ++ if (notes->branch) { + if (al->cycles && al->cycles->ipc) + obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc); + else if (!show_title) +diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h +index 0fa72eb559ac41..b79614c44a2424 100644 +--- a/tools/perf/util/annotate.h ++++ b/tools/perf/util/annotate.h +@@ -269,27 +269,30 @@ struct cyc_hist { + * returns. + */ + struct annotated_source { +- struct list_head source; +- int nr_histograms; +- size_t sizeof_sym_hist; +- struct cyc_hist *cycles_hist; +- struct sym_hist *histograms; ++ struct list_head source; ++ size_t sizeof_sym_hist; ++ struct sym_hist *histograms; ++ int nr_histograms; ++ int nr_entries; ++ int nr_asm_entries; ++ u16 max_line_len; + }; + +-struct LOCKABLE annotation { +- u64 max_coverage; +- u64 start; ++struct annotated_branch { + u64 hit_cycles; + u64 hit_insn; + unsigned int total_insn; + unsigned int cover_insn; ++ struct cyc_hist *cycles_hist; ++}; ++ ++struct LOCKABLE annotation { ++ u64 max_coverage; ++ u64 start; + struct annotation_options *options; + struct annotation_line **offsets; + int nr_events; + int max_jump_sources; +- int nr_entries; +- int nr_asm_entries; +- u16 max_line_len; + struct { + u8 addr; + u8 jumps; +@@ -298,8 +301,8 @@ struct LOCKABLE annotation { + u8 max_addr; + u8 max_ins_name; + } widths; +- bool have_cycles; + struct annotated_source *src; ++ struct annotated_branch *branch; + }; + + static inline void annotation__init(struct annotation *notes __maybe_unused) +@@ -313,10 +316,10 @@ bool annotation__trylock(struct annotation *notes) EXCLUSIVE_TRYLOCK_FUNCTION(tr + + static inline int annotation__cycles_width(struct annotation *notes) + { +- if (notes->have_cycles && notes->options->show_minmax_cycle) ++ if (notes->branch && notes->options->show_minmax_cycle) + return ANNOTATION__IPC_WIDTH + ANNOTATION__MINMAX_CYCLES_WIDTH; + +- return notes->have_cycles ? ANNOTATION__IPC_WIDTH + ANNOTATION__CYCLES_WIDTH : 0; ++ return notes->branch ? ANNOTATION__IPC_WIDTH + ANNOTATION__CYCLES_WIDTH : 0; + } + + static inline int annotation__pcnt_width(struct annotation *notes) +@@ -409,13 +412,11 @@ int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel); + + #ifdef HAVE_SLANG_SUPPORT + int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, +- struct hist_browser_timer *hbt, +- struct annotation_options *opts); ++ struct hist_browser_timer *hbt); + #else + static inline int symbol__tui_annotate(struct map_symbol *ms __maybe_unused, + struct evsel *evsel __maybe_unused, +- struct hist_browser_timer *hbt __maybe_unused, +- struct annotation_options *opts __maybe_unused) ++ struct hist_browser_timer *hbt __maybe_unused) + { + return 0; + } +diff --git a/tools/perf/util/block-info.c b/tools/perf/util/block-info.c +index 591fc1edd385ca..dec910989701eb 100644 +--- a/tools/perf/util/block-info.c ++++ b/tools/perf/util/block-info.c +@@ -129,9 +129,9 @@ int block_info__process_sym(struct hist_entry *he, struct block_hist *bh, + al.sym = he->ms.sym; + + notes = symbol__annotation(he->ms.sym); +- if (!notes || !notes->src || !notes->src->cycles_hist) ++ if (!notes || !notes->branch || !notes->branch->cycles_hist) + return 0; +- ch = notes->src->cycles_hist; ++ ch = notes->branch->cycles_hist; + for (unsigned int i = 0; i < symbol__size(he->ms.sym); i++) { + if (ch[i].num_aggr) { + struct block_info *bi; +@@ -464,8 +464,7 @@ void block_info__free_report(struct block_report *reps, int nr_reps) + } + + int report__browse_block_hists(struct block_hist *bh, float min_percent, +- struct evsel *evsel, struct perf_env *env, +- struct annotation_options *annotation_opts) ++ struct evsel *evsel, struct perf_env *env) + { + int ret; + +@@ -477,8 +476,7 @@ int report__browse_block_hists(struct block_hist *bh, float min_percent, + return 0; + case 1: + symbol_conf.report_individual_block = true; +- ret = block_hists_tui_browse(bh, evsel, min_percent, +- env, annotation_opts); ++ ret = block_hists_tui_browse(bh, evsel, min_percent, env); + return ret; + default: + return -1; +diff --git a/tools/perf/util/block-info.h b/tools/perf/util/block-info.h +index 42e9dcc4cf0ab3..96f53e89795e24 100644 +--- a/tools/perf/util/block-info.h ++++ b/tools/perf/util/block-info.h +@@ -78,8 +78,7 @@ struct block_report *block_info__create_report(struct evlist *evlist, + void block_info__free_report(struct block_report *reps, int nr_reps); + + int report__browse_block_hists(struct block_hist *bh, float min_percent, +- struct evsel *evsel, struct perf_env *env, +- struct annotation_options *annotation_opts); ++ struct evsel *evsel, struct perf_env *env); + + float block_info__total_cycles_percent(struct hist_entry *he); + +diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h +index afc9f1c7f4dc24..5d0db96609dff5 100644 +--- a/tools/perf/util/hist.h ++++ b/tools/perf/util/hist.h +@@ -457,7 +457,6 @@ struct hist_browser_timer { + int refresh; + }; + +-struct annotation_options; + struct res_sample; + + enum rstype { +@@ -473,16 +472,13 @@ struct block_hist; + void attr_to_script(char *buf, struct perf_event_attr *attr); + + int map_symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, +- struct hist_browser_timer *hbt, +- struct annotation_options *annotation_opts); ++ struct hist_browser_timer *hbt); + + int hist_entry__tui_annotate(struct hist_entry *he, struct evsel *evsel, +- struct hist_browser_timer *hbt, +- struct annotation_options *annotation_opts); ++ struct hist_browser_timer *hbt); + + int evlist__tui_browse_hists(struct evlist *evlist, const char *help, struct hist_browser_timer *hbt, +- float min_pcnt, struct perf_env *env, bool warn_lost_event, +- struct annotation_options *annotation_options); ++ float min_pcnt, struct perf_env *env, bool warn_lost_event); + + int script_browse(const char *script_opt, struct evsel *evsel); + +@@ -492,8 +488,7 @@ int res_sample_browse(struct res_sample *res_samples, int num_res, + void res_sample_init(void); + + int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel, +- float min_percent, struct perf_env *env, +- struct annotation_options *annotation_opts); ++ float min_percent, struct perf_env *env); + #else + static inline + int evlist__tui_browse_hists(struct evlist *evlist __maybe_unused, +@@ -501,23 +496,20 @@ int evlist__tui_browse_hists(struct evlist *evlist __maybe_unused, + struct hist_browser_timer *hbt __maybe_unused, + float min_pcnt __maybe_unused, + struct perf_env *env __maybe_unused, +- bool warn_lost_event __maybe_unused, +- struct annotation_options *annotation_options __maybe_unused) ++ bool warn_lost_event __maybe_unused) + { + return 0; + } + static inline int map_symbol__tui_annotate(struct map_symbol *ms __maybe_unused, + struct evsel *evsel __maybe_unused, +- struct hist_browser_timer *hbt __maybe_unused, +- struct annotation_options *annotation_options __maybe_unused) ++ struct hist_browser_timer *hbt __maybe_unused) + { + return 0; + } + + static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused, + struct evsel *evsel __maybe_unused, +- struct hist_browser_timer *hbt __maybe_unused, +- struct annotation_options *annotation_opts __maybe_unused) ++ struct hist_browser_timer *hbt __maybe_unused) + { + return 0; + } +@@ -541,8 +533,7 @@ static inline void res_sample_init(void) {} + static inline int block_hists_tui_browse(struct block_hist *bh __maybe_unused, + struct evsel *evsel __maybe_unused, + float min_percent __maybe_unused, +- struct perf_env *env __maybe_unused, +- struct annotation_options *annotation_opts __maybe_unused) ++ struct perf_env *env __maybe_unused) + { + return 0; + } +diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c +index c6afba7ab1a517..277b2cbd518611 100644 +--- a/tools/perf/util/session.c ++++ b/tools/perf/util/session.c +@@ -1500,6 +1500,9 @@ static int deliver_sample_group(struct evlist *evlist, + int ret = -EINVAL; + struct sample_read_value *v = sample->read.group.values; + ++ if (tool->dont_split_sample_group) ++ return deliver_sample_value(evlist, tool, event, sample, v, machine); ++ + sample_read_group__for_each(v, sample->read.group.nr, read_format) { + ret = deliver_sample_value(evlist, tool, event, sample, v, + machine); +diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c +index 6ab8147a3f8708..b80349ca219972 100644 +--- a/tools/perf/util/sort.c ++++ b/tools/perf/util/sort.c +@@ -583,21 +583,21 @@ static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, + { + + struct symbol *sym = he->ms.sym; +- struct annotation *notes; ++ struct annotated_branch *branch; + double ipc = 0.0, coverage = 0.0; + char tmp[64]; + + if (!sym) + return repsep_snprintf(bf, size, "%-*s", width, "-"); + +- notes = symbol__annotation(sym); ++ branch = symbol__annotation(sym)->branch; + +- if (notes->hit_cycles) +- ipc = notes->hit_insn / ((double)notes->hit_cycles); ++ if (branch && branch->hit_cycles) ++ ipc = branch->hit_insn / ((double)branch->hit_cycles); + +- if (notes->total_insn) { +- coverage = notes->cover_insn * 100.0 / +- ((double)notes->total_insn); ++ if (branch && branch->total_insn) { ++ coverage = branch->cover_insn * 100.0 / ++ ((double)branch->total_insn); + } + + snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); +diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c +index 0abe35388ab156..f98ade7f9fba4f 100644 +--- a/tools/perf/util/stat-display.c ++++ b/tools/perf/util/stat-display.c +@@ -1207,7 +1207,8 @@ static void print_metric_headers(struct perf_stat_config *config, + + /* Print metrics headers only */ + evlist__for_each_entry(evlist, counter) { +- if (config->aggr_mode != AGGR_NONE && counter->metric_leader != counter) ++ if (!config->iostat_run && ++ config->aggr_mode != AGGR_NONE && counter->metric_leader != counter) + continue; + + os.evsel = counter; +diff --git a/tools/perf/util/time-utils.c b/tools/perf/util/time-utils.c +index 30244392168163..1b91ccd4d52348 100644 +--- a/tools/perf/util/time-utils.c ++++ b/tools/perf/util/time-utils.c +@@ -20,7 +20,7 @@ int parse_nsec_time(const char *str, u64 *ptime) + u64 time_sec, time_nsec; + char *end; + +- time_sec = strtoul(str, &end, 10); ++ time_sec = strtoull(str, &end, 10); + if (*end != '.' && *end != '\0') + return -1; + +@@ -38,7 +38,7 @@ int parse_nsec_time(const char *str, u64 *ptime) + for (i = strlen(nsec_buf); i < 9; i++) + nsec_buf[i] = '0'; + +- time_nsec = strtoul(nsec_buf, &end, 10); ++ time_nsec = strtoull(nsec_buf, &end, 10); + if (*end != '\0') + return -1; + } else +diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h +index c957fb849ac633..62bbc9cec151bb 100644 +--- a/tools/perf/util/tool.h ++++ b/tools/perf/util/tool.h +@@ -85,6 +85,7 @@ struct perf_tool { + bool namespace_events; + bool cgroup_events; + bool no_warn; ++ bool dont_split_sample_group; + enum show_feature_header show_feat_hdr; + }; + +diff --git a/tools/power/cpupower/lib/powercap.c b/tools/power/cpupower/lib/powercap.c +index a7a59c6bacda81..94a0c69e55ef5e 100644 +--- a/tools/power/cpupower/lib/powercap.c ++++ b/tools/power/cpupower/lib/powercap.c +@@ -77,6 +77,14 @@ int powercap_get_enabled(int *mode) + return sysfs_get_enabled(path, mode); + } + ++/* ++ * TODO: implement function. Returns dummy 0 for now. ++ */ ++int powercap_set_enabled(int mode) ++{ ++ return 0; ++} ++ + /* + * Hardcoded, because rapl is the only powercap implementation + - * this needs to get more generic if more powercap implementations +diff --git a/tools/testing/selftests/arm64/signal/Makefile b/tools/testing/selftests/arm64/signal/Makefile +index 8f5febaf1a9a25..edb3613513b8a8 100644 +--- a/tools/testing/selftests/arm64/signal/Makefile ++++ b/tools/testing/selftests/arm64/signal/Makefile +@@ -23,7 +23,7 @@ $(TEST_GEN_PROGS): $(PROGS) + # Common test-unit targets to build common-layout test-cases executables + # Needs secondary expansion to properly include the testcase c-file in pre-reqs + COMMON_SOURCES := test_signals.c test_signals_utils.c testcases/testcases.c \ +- signals.S ++ signals.S sve_helpers.c + COMMON_HEADERS := test_signals.h test_signals_utils.h testcases/testcases.h + + .SECONDEXPANSION: +diff --git a/tools/testing/selftests/arm64/signal/sve_helpers.c b/tools/testing/selftests/arm64/signal/sve_helpers.c +new file mode 100644 +index 00000000000000..0acc121af3062a +--- /dev/null ++++ b/tools/testing/selftests/arm64/signal/sve_helpers.c +@@ -0,0 +1,56 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2024 ARM Limited ++ * ++ * Common helper functions for SVE and SME functionality. ++ */ ++ ++#include ++#include ++#include ++#include ++ ++unsigned int vls[SVE_VQ_MAX]; ++unsigned int nvls; ++ ++int sve_fill_vls(bool use_sme, int min_vls) ++{ ++ int vq, vl; ++ int pr_set_vl = use_sme ? PR_SME_SET_VL : PR_SVE_SET_VL; ++ int len_mask = use_sme ? PR_SME_VL_LEN_MASK : PR_SVE_VL_LEN_MASK; ++ ++ /* ++ * Enumerate up to SVE_VQ_MAX vector lengths ++ */ ++ for (vq = SVE_VQ_MAX; vq > 0; --vq) { ++ vl = prctl(pr_set_vl, vq * 16); ++ if (vl == -1) ++ return KSFT_FAIL; ++ ++ vl &= len_mask; ++ ++ /* ++ * Unlike SVE, SME does not require the minimum vector length ++ * to be implemented, or the VLs to be consecutive, so any call ++ * to the prctl might return the single implemented VL, which ++ * might be larger than 16. So to avoid this loop never ++ * terminating, bail out here when we find a higher VL than ++ * we asked for. ++ * See the ARM ARM, DDI 0487K.a, B1.4.2: I_QQRNR and I_NWYBP. ++ */ ++ if (vq < sve_vq_from_vl(vl)) ++ break; ++ ++ /* Skip missing VLs */ ++ vq = sve_vq_from_vl(vl); ++ ++ vls[nvls++] = vl; ++ } ++ ++ if (nvls < min_vls) { ++ fprintf(stderr, "Only %d VL supported\n", nvls); ++ return KSFT_SKIP; ++ } ++ ++ return KSFT_PASS; ++} +diff --git a/tools/testing/selftests/arm64/signal/sve_helpers.h b/tools/testing/selftests/arm64/signal/sve_helpers.h +new file mode 100644 +index 00000000000000..50948ce471cc62 +--- /dev/null ++++ b/tools/testing/selftests/arm64/signal/sve_helpers.h +@@ -0,0 +1,21 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2024 ARM Limited ++ * ++ * Common helper functions for SVE and SME functionality. ++ */ ++ ++#ifndef __SVE_HELPERS_H__ ++#define __SVE_HELPERS_H__ ++ ++#include ++ ++#define VLS_USE_SVE false ++#define VLS_USE_SME true ++ ++extern unsigned int vls[]; ++extern unsigned int nvls; ++ ++int sve_fill_vls(bool use_sme, int min_vls); ++ ++#endif +diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c +index ebd5815b54bbaa..dfd6a2badf9fb3 100644 +--- a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c ++++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c +@@ -6,44 +6,28 @@ + * handler, this is not supported and is expected to segfault. + */ + ++#include + #include + #include + #include + + #include "test_signals_utils.h" ++#include "sve_helpers.h" + #include "testcases.h" + + struct fake_sigframe sf; +-static unsigned int vls[SVE_VQ_MAX]; +-unsigned int nvls = 0; + + static bool sme_get_vls(struct tdescr *td) + { +- int vq, vl; ++ int res = sve_fill_vls(VLS_USE_SME, 2); + +- /* +- * Enumerate up to SVE_VQ_MAX vector lengths +- */ +- for (vq = SVE_VQ_MAX; vq > 0; --vq) { +- vl = prctl(PR_SVE_SET_VL, vq * 16); +- if (vl == -1) +- return false; ++ if (!res) ++ return true; + +- vl &= PR_SME_VL_LEN_MASK; ++ if (res == KSFT_SKIP) ++ td->result = KSFT_SKIP; + +- /* Skip missing VLs */ +- vq = sve_vq_from_vl(vl); +- +- vls[nvls++] = vl; +- } +- +- /* We need at least two VLs */ +- if (nvls < 2) { +- fprintf(stderr, "Only %d VL supported\n", nvls); +- return false; +- } +- +- return true; ++ return false; + } + + static int fake_sigreturn_ssve_change_vl(struct tdescr *td, +@@ -51,30 +35,30 @@ static int fake_sigreturn_ssve_change_vl(struct tdescr *td, + { + size_t resv_sz, offset; + struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf); +- struct sve_context *sve; ++ struct za_context *za; + + /* Get a signal context with a SME ZA frame in it */ + if (!get_current_context(td, &sf.uc, sizeof(sf.uc))) + return 1; + + resv_sz = GET_SF_RESV_SIZE(sf); +- head = get_header(head, SVE_MAGIC, resv_sz, &offset); ++ head = get_header(head, ZA_MAGIC, resv_sz, &offset); + if (!head) { +- fprintf(stderr, "No SVE context\n"); ++ fprintf(stderr, "No ZA context\n"); + return 1; + } + +- if (head->size != sizeof(struct sve_context)) { ++ if (head->size != sizeof(struct za_context)) { + fprintf(stderr, "Register data present, aborting\n"); + return 1; + } + +- sve = (struct sve_context *)head; ++ za = (struct za_context *)head; + + /* No changes are supported; init left us at minimum VL so go to max */ + fprintf(stderr, "Attempting to change VL from %d to %d\n", +- sve->vl, vls[0]); +- sve->vl = vls[0]; ++ za->vl, vls[0]); ++ za->vl = vls[0]; + + fake_sigreturn(&sf, sizeof(sf), 0); + +diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c +index e2a452190511ff..e1ccf8f85a70c8 100644 +--- a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c ++++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c +@@ -12,40 +12,22 @@ + #include + + #include "test_signals_utils.h" ++#include "sve_helpers.h" + #include "testcases.h" + + struct fake_sigframe sf; +-static unsigned int vls[SVE_VQ_MAX]; +-unsigned int nvls = 0; + + static bool sve_get_vls(struct tdescr *td) + { +- int vq, vl; ++ int res = sve_fill_vls(VLS_USE_SVE, 2); + +- /* +- * Enumerate up to SVE_VQ_MAX vector lengths +- */ +- for (vq = SVE_VQ_MAX; vq > 0; --vq) { +- vl = prctl(PR_SVE_SET_VL, vq * 16); +- if (vl == -1) +- return false; ++ if (!res) ++ return true; + +- vl &= PR_SVE_VL_LEN_MASK; +- +- /* Skip missing VLs */ +- vq = sve_vq_from_vl(vl); +- +- vls[nvls++] = vl; +- } +- +- /* We need at least two VLs */ +- if (nvls < 2) { +- fprintf(stderr, "Only %d VL supported\n", nvls); ++ if (res == KSFT_SKIP) + td->result = KSFT_SKIP; +- return false; +- } + +- return true; ++ return false; + } + + static int fake_sigreturn_sve_change_vl(struct tdescr *td, +diff --git a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c +index 3d37daafcff513..6dbe48cf8b09ed 100644 +--- a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c ++++ b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c +@@ -6,51 +6,31 @@ + * set up as expected. + */ + ++#include + #include + #include + #include + + #include "test_signals_utils.h" ++#include "sve_helpers.h" + #include "testcases.h" + + static union { + ucontext_t uc; + char buf[1024 * 64]; + } context; +-static unsigned int vls[SVE_VQ_MAX]; +-unsigned int nvls = 0; + + static bool sme_get_vls(struct tdescr *td) + { +- int vq, vl; ++ int res = sve_fill_vls(VLS_USE_SME, 1); + +- /* +- * Enumerate up to SVE_VQ_MAX vector lengths +- */ +- for (vq = SVE_VQ_MAX; vq > 0; --vq) { +- vl = prctl(PR_SME_SET_VL, vq * 16); +- if (vl == -1) +- return false; +- +- vl &= PR_SME_VL_LEN_MASK; +- +- /* Did we find the lowest supported VL? */ +- if (vq < sve_vq_from_vl(vl)) +- break; ++ if (!res) ++ return true; + +- /* Skip missing VLs */ +- vq = sve_vq_from_vl(vl); +- +- vls[nvls++] = vl; +- } +- +- /* We need at least one VL */ +- if (nvls < 1) { +- fprintf(stderr, "Only %d VL supported\n", nvls); +- return false; +- } ++ if (res == KSFT_SKIP) ++ td->result = KSFT_SKIP; + +- return true; ++ return false; + } + + static void setup_ssve_regs(void) +diff --git a/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c b/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c +index 9dc5f128bbc0d5..5557e116e97363 100644 +--- a/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c ++++ b/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c +@@ -6,51 +6,31 @@ + * signal frames is set up as expected when enabled simultaneously. + */ + ++#include + #include + #include + #include + + #include "test_signals_utils.h" ++#include "sve_helpers.h" + #include "testcases.h" + + static union { + ucontext_t uc; + char buf[1024 * 128]; + } context; +-static unsigned int vls[SVE_VQ_MAX]; +-unsigned int nvls = 0; + + static bool sme_get_vls(struct tdescr *td) + { +- int vq, vl; ++ int res = sve_fill_vls(VLS_USE_SME, 1); + +- /* +- * Enumerate up to SVE_VQ_MAX vector lengths +- */ +- for (vq = SVE_VQ_MAX; vq > 0; --vq) { +- vl = prctl(PR_SME_SET_VL, vq * 16); +- if (vl == -1) +- return false; +- +- vl &= PR_SME_VL_LEN_MASK; +- +- /* Did we find the lowest supported VL? */ +- if (vq < sve_vq_from_vl(vl)) +- break; ++ if (!res) ++ return true; + +- /* Skip missing VLs */ +- vq = sve_vq_from_vl(vl); +- +- vls[nvls++] = vl; +- } +- +- /* We need at least one VL */ +- if (nvls < 1) { +- fprintf(stderr, "Only %d VL supported\n", nvls); +- return false; +- } ++ if (res == KSFT_SKIP) ++ td->result = KSFT_SKIP; + +- return true; ++ return false; + } + + static void setup_regs(void) +diff --git a/tools/testing/selftests/arm64/signal/testcases/sve_regs.c b/tools/testing/selftests/arm64/signal/testcases/sve_regs.c +index 8b16eabbb7697e..8143eb1c58c187 100644 +--- a/tools/testing/selftests/arm64/signal/testcases/sve_regs.c ++++ b/tools/testing/selftests/arm64/signal/testcases/sve_regs.c +@@ -6,47 +6,31 @@ + * expected. + */ + ++#include + #include + #include + #include + + #include "test_signals_utils.h" ++#include "sve_helpers.h" + #include "testcases.h" + + static union { + ucontext_t uc; + char buf[1024 * 64]; + } context; +-static unsigned int vls[SVE_VQ_MAX]; +-unsigned int nvls = 0; + + static bool sve_get_vls(struct tdescr *td) + { +- int vq, vl; ++ int res = sve_fill_vls(VLS_USE_SVE, 1); + +- /* +- * Enumerate up to SVE_VQ_MAX vector lengths +- */ +- for (vq = SVE_VQ_MAX; vq > 0; --vq) { +- vl = prctl(PR_SVE_SET_VL, vq * 16); +- if (vl == -1) +- return false; +- +- vl &= PR_SVE_VL_LEN_MASK; +- +- /* Skip missing VLs */ +- vq = sve_vq_from_vl(vl); ++ if (!res) ++ return true; + +- vls[nvls++] = vl; +- } +- +- /* We need at least one VL */ +- if (nvls < 1) { +- fprintf(stderr, "Only %d VL supported\n", nvls); +- return false; +- } ++ if (res == KSFT_SKIP) ++ td->result = KSFT_SKIP; + +- return true; ++ return false; + } + + static void setup_sve_regs(void) +diff --git a/tools/testing/selftests/arm64/signal/testcases/za_no_regs.c b/tools/testing/selftests/arm64/signal/testcases/za_no_regs.c +index 4d6f94b6178f36..ce26e9c2fa5e34 100644 +--- a/tools/testing/selftests/arm64/signal/testcases/za_no_regs.c ++++ b/tools/testing/selftests/arm64/signal/testcases/za_no_regs.c +@@ -6,47 +6,31 @@ + * expected. + */ + ++#include + #include + #include + #include + + #include "test_signals_utils.h" ++#include "sve_helpers.h" + #include "testcases.h" + + static union { + ucontext_t uc; + char buf[1024 * 128]; + } context; +-static unsigned int vls[SVE_VQ_MAX]; +-unsigned int nvls = 0; + + static bool sme_get_vls(struct tdescr *td) + { +- int vq, vl; ++ int res = sve_fill_vls(VLS_USE_SME, 1); + +- /* +- * Enumerate up to SME_VQ_MAX vector lengths +- */ +- for (vq = SVE_VQ_MAX; vq > 0; --vq) { +- vl = prctl(PR_SME_SET_VL, vq * 16); +- if (vl == -1) +- return false; +- +- vl &= PR_SME_VL_LEN_MASK; +- +- /* Skip missing VLs */ +- vq = sve_vq_from_vl(vl); ++ if (!res) ++ return true; + +- vls[nvls++] = vl; +- } +- +- /* We need at least one VL */ +- if (nvls < 1) { +- fprintf(stderr, "Only %d VL supported\n", nvls); +- return false; +- } ++ if (res == KSFT_SKIP) ++ td->result = KSFT_SKIP; + +- return true; ++ return false; + } + + static int do_one_sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc, +diff --git a/tools/testing/selftests/arm64/signal/testcases/za_regs.c b/tools/testing/selftests/arm64/signal/testcases/za_regs.c +index 174ad665669647..b9e13f27f1f9aa 100644 +--- a/tools/testing/selftests/arm64/signal/testcases/za_regs.c ++++ b/tools/testing/selftests/arm64/signal/testcases/za_regs.c +@@ -6,51 +6,31 @@ + * expected. + */ + ++#include + #include + #include + #include + + #include "test_signals_utils.h" ++#include "sve_helpers.h" + #include "testcases.h" + + static union { + ucontext_t uc; + char buf[1024 * 128]; + } context; +-static unsigned int vls[SVE_VQ_MAX]; +-unsigned int nvls = 0; + + static bool sme_get_vls(struct tdescr *td) + { +- int vq, vl; ++ int res = sve_fill_vls(VLS_USE_SME, 1); + +- /* +- * Enumerate up to SME_VQ_MAX vector lengths +- */ +- for (vq = SVE_VQ_MAX; vq > 0; --vq) { +- vl = prctl(PR_SME_SET_VL, vq * 16); +- if (vl == -1) +- return false; +- +- vl &= PR_SME_VL_LEN_MASK; +- +- /* Did we find the lowest supported VL? */ +- if (vq < sve_vq_from_vl(vl)) +- break; ++ if (!res) ++ return true; + +- /* Skip missing VLs */ +- vq = sve_vq_from_vl(vl); +- +- vls[nvls++] = vl; +- } +- +- /* We need at least one VL */ +- if (nvls < 1) { +- fprintf(stderr, "Only %d VL supported\n", nvls); +- return false; +- } ++ if (res == KSFT_SKIP) ++ td->result = KSFT_SKIP; + +- return true; ++ return false; + } + + static void setup_za_regs(void) +diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile +index caede9b574cb16..ab364e95a9b23e 100644 +--- a/tools/testing/selftests/bpf/Makefile ++++ b/tools/testing/selftests/bpf/Makefile +@@ -56,6 +56,15 @@ TEST_INST_SUBDIRS := no_alu32 + ifneq ($(BPF_GCC),) + TEST_GEN_PROGS += test_progs-bpf_gcc + TEST_INST_SUBDIRS += bpf_gcc ++ ++# The following tests contain C code that, although technically legal, ++# triggers GCC warnings that cannot be disabled: declaration of ++# anonymous struct types in function parameter lists. ++progs/btf_dump_test_case_bitfields.c-bpf_gcc-CFLAGS := -Wno-error ++progs/btf_dump_test_case_namespacing.c-bpf_gcc-CFLAGS := -Wno-error ++progs/btf_dump_test_case_packing.c-bpf_gcc-CFLAGS := -Wno-error ++progs/btf_dump_test_case_padding.c-bpf_gcc-CFLAGS := -Wno-error ++progs/btf_dump_test_case_syntax.c-bpf_gcc-CFLAGS := -Wno-error + endif + + ifneq ($(CLANG_CPUV4),) +@@ -386,24 +395,25 @@ $(OUTPUT)/cgroup_getset_retval_hooks.o: cgroup_getset_retval_hooks.h + # $1 - input .c file + # $2 - output .o file + # $3 - CFLAGS ++# $4 - binary name + define CLANG_BPF_BUILD_RULE +- $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2) ++ $(call msg,CLNG-BPF,$4,$2) + $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v3 -o $2 + endef + # Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32 + define CLANG_NOALU32_BPF_BUILD_RULE +- $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2) ++ $(call msg,CLNG-BPF,$4,$2) + $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v2 -o $2 + endef + # Similar to CLANG_BPF_BUILD_RULE, but with cpu-v4 + define CLANG_CPUV4_BPF_BUILD_RULE +- $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2) ++ $(call msg,CLNG-BPF,$4,$2) + $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v4 -o $2 + endef + # Build BPF object using GCC + define GCC_BPF_BUILD_RULE +- $(call msg,GCC-BPF,$(TRUNNER_BINARY),$2) +- $(Q)$(BPF_GCC) $3 -O2 -c $1 -o $2 ++ $(call msg,GCC-BPF,$4,$2) ++ $(Q)$(BPF_GCC) $3 -DBPF_NO_PRESERVE_ACCESS_INDEX -Wno-attributes -O2 -c $1 -o $2 + endef + + SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c +@@ -442,7 +452,7 @@ LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.c,$(foreach skel,$(LINKED_SKELS),$($(ske + # $eval()) and pass control to DEFINE_TEST_RUNNER_RULES. + # Parameters: + # $1 - test runner base binary name (e.g., test_progs) +-# $2 - test runner extra "flavor" (e.g., no_alu32, cpuv4, gcc-bpf, etc) ++# $2 - test runner extra "flavor" (e.g., no_alu32, cpuv4, bpf_gcc, etc) + define DEFINE_TEST_RUNNER + + TRUNNER_OUTPUT := $(OUTPUT)$(if $2,/)$2 +@@ -470,7 +480,7 @@ endef + # Using TRUNNER_XXX variables, provided by callers of DEFINE_TEST_RUNNER and + # set up by DEFINE_TEST_RUNNER itself, create test runner build rules with: + # $1 - test runner base binary name (e.g., test_progs) +-# $2 - test runner extra "flavor" (e.g., no_alu32, cpuv4, gcc-bpf, etc) ++# $2 - test runner extra "flavor" (e.g., no_alu32, cpuv4, bpf_gcc, etc) + define DEFINE_TEST_RUNNER_RULES + + ifeq ($($(TRUNNER_OUTPUT)-dir),) +@@ -492,7 +502,9 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.bpf.o: \ + $(wildcard $(BPFDIR)/*.bpf.h) \ + | $(TRUNNER_OUTPUT) $$(BPFOBJ) + $$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \ +- $(TRUNNER_BPF_CFLAGS)) ++ $(TRUNNER_BPF_CFLAGS) \ ++ $$($$<-CFLAGS) \ ++ $$($$<-$2-CFLAGS),$(TRUNNER_BINARY)) + + $(TRUNNER_BPF_SKELS): %.skel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT) + $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@) +@@ -702,6 +714,8 @@ $(OUTPUT)/veristat: $(OUTPUT)/veristat.o + $(call msg,BINARY,,$@) + $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@ + ++# Linking uprobe_multi can fail due to relocation overflows on mips. ++$(OUTPUT)/uprobe_multi: CFLAGS += $(if $(filter mips, $(ARCH)),-mxgot) + $(OUTPUT)/uprobe_multi: uprobe_multi.c + $(call msg,BINARY,,$@) + $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDLIBS) -o $@ +diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c +index 73ce11b0547da7..b705cbabe1e2f2 100644 +--- a/tools/testing/selftests/bpf/bench.c ++++ b/tools/testing/selftests/bpf/bench.c +@@ -10,6 +10,7 @@ + #include + #include + #include "bench.h" ++#include "bpf_util.h" + #include "testing_helpers.h" + + struct env env = { +diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h +index 68180d8f8558ec..005c401b3e2275 100644 +--- a/tools/testing/selftests/bpf/bench.h ++++ b/tools/testing/selftests/bpf/bench.h +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + + struct cpu_set { + bool *cpus; +diff --git a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c +index 18405c3b7cee9a..af10c309359a77 100644 +--- a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c ++++ b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c +@@ -412,7 +412,7 @@ static void test_sk_storage_map_stress_free(void) + rlim_new.rlim_max = rlim_new.rlim_cur + 128; + err = setrlimit(RLIMIT_NOFILE, &rlim_new); + CHECK(err, "setrlimit(RLIMIT_NOFILE)", "rlim_new:%lu errno:%d", +- rlim_new.rlim_cur, errno); ++ (unsigned long) rlim_new.rlim_cur, errno); + } + + err = do_sk_storage_map_stress_free(); +diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c +index 0877b60ec81f69..d2acc88752126c 100644 +--- a/tools/testing/selftests/bpf/network_helpers.c ++++ b/tools/testing/selftests/bpf/network_helpers.c +@@ -465,3 +465,27 @@ int get_socket_local_port(int sock_fd) + + return -1; + } ++ ++int get_hw_ring_size(char *ifname, struct ethtool_ringparam *ring_param) ++{ ++ struct ifreq ifr = {0}; ++ int sockfd, err; ++ ++ sockfd = socket(AF_INET, SOCK_DGRAM, 0); ++ if (sockfd < 0) ++ return -errno; ++ ++ memcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); ++ ++ ring_param->cmd = ETHTOOL_GRINGPARAM; ++ ifr.ifr_data = (char *)ring_param; ++ ++ if (ioctl(sockfd, SIOCETHTOOL, &ifr) < 0) { ++ err = errno; ++ close(sockfd); ++ return -err; ++ } ++ ++ close(sockfd); ++ return 0; ++} +diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h +index 5eccc67d1a9989..11cbe194769b16 100644 +--- a/tools/testing/selftests/bpf/network_helpers.h ++++ b/tools/testing/selftests/bpf/network_helpers.h +@@ -9,8 +9,11 @@ typedef __u16 __sum16; + #include + #include + #include ++#include ++#include + #include + #include ++#include + + #define MAGIC_VAL 0x1234 + #define NUM_ITER 100000 +@@ -60,6 +63,7 @@ int make_sockaddr(int family, const char *addr_str, __u16 port, + struct sockaddr_storage *addr, socklen_t *len); + char *ping_command(int family); + int get_socket_local_port(int sock_fd); ++int get_hw_ring_size(char *ifname, struct ethtool_ringparam *ring_param); + + struct nstoken; + /** +diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c +index b52ff8ce34db82..16bed9dd8e6a30 100644 +--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c ++++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c +@@ -95,7 +95,7 @@ static unsigned short get_local_port(int fd) + struct sockaddr_in6 addr; + socklen_t addrlen = sizeof(addr); + +- if (!getsockname(fd, &addr, &addrlen)) ++ if (!getsockname(fd, (struct sockaddr *)&addr, &addrlen)) + return ntohs(addr.sin6_port); + + return 0; +diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c +index 47f42e6801056b..26019313e1fc20 100644 +--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c ++++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c +@@ -1,4 +1,5 @@ + // SPDX-License-Identifier: GPL-2.0 ++#define _GNU_SOURCE + #include + #include "progs/core_reloc_types.h" + #include "bpf_testmod/bpf_testmod.h" +diff --git a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c +index 5c0ebe6ba86673..95ea5a6a5f18dc 100644 +--- a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c ++++ b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c +@@ -4,7 +4,6 @@ + #include + #include + #include +-#include + + #include "test_progs.h" + #include "network_helpers.h" +diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +index c4773173a4e437..3171047414a7dc 100644 +--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c ++++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +@@ -1,8 +1,7 @@ + // SPDX-License-Identifier: GPL-2.0 ++#define _GNU_SOURCE + #include + #include +-#include +-#include + #include + #include + +diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c +index c07991544a789e..34f8822fd2219c 100644 +--- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c ++++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c +@@ -1,4 +1,5 @@ + // SPDX-License-Identifier: GPL-2.0 ++#define _GNU_SOURCE + #include + #include + #include "kfree_skb.skel.h" +diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h +index 61333f2a03f91f..68c08309dbc823 100644 +--- a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h ++++ b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h +@@ -27,8 +27,6 @@ + } \ + }) + +-#define NETNS "ns_lwt" +- + static inline int netns_create(void) + { + return system("ip netns add " NETNS); +diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c +index 2bc932a18c17e5..7b458ae5f0f856 100644 +--- a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c ++++ b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c +@@ -47,13 +47,13 @@ + #include + #include + #include +-#include + #include + #include + #include + #include + #include + ++#define NETNS "ns_lwt_redirect" + #include "lwt_helpers.h" + #include "test_progs.h" + #include "network_helpers.h" +diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c +index f4bb2d5fcae0a0..920ee3042d0934 100644 +--- a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c ++++ b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c +@@ -48,6 +48,8 @@ + * For case 2, force UDP packets to overflow fq limit. As long as kernel + * is not crashed, it is considered successful. + */ ++#define NETNS "ns_lwt_reroute" ++#include + #include "lwt_helpers.h" + #include "network_helpers.h" + #include +diff --git a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c +index 24d493482ffc75..2c57ceede095eb 100644 +--- a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c ++++ b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c +@@ -11,78 +11,168 @@ + #include + #include + #include +-#include ++#include ++#include "network_helpers.h" + + #define STACK_SIZE (1024 * 1024) + static char child_stack[STACK_SIZE]; + +-static int test_current_pid_tgid(void *args) ++static int get_pid_tgid(pid_t *pid, pid_t *tgid, ++ struct test_ns_current_pid_tgid__bss *bss) + { +- struct test_ns_current_pid_tgid__bss *bss; +- struct test_ns_current_pid_tgid *skel; +- int err = -1, duration = 0; +- pid_t tgid, pid; + struct stat st; ++ int err; + +- skel = test_ns_current_pid_tgid__open_and_load(); +- if (CHECK(!skel, "skel_open_load", "failed to load skeleton\n")) +- goto cleanup; +- +- pid = syscall(SYS_gettid); +- tgid = getpid(); ++ *pid = syscall(SYS_gettid); ++ *tgid = getpid(); + + err = stat("/proc/self/ns/pid", &st); +- if (CHECK(err, "stat", "failed /proc/self/ns/pid: %d\n", err)) +- goto cleanup; ++ if (!ASSERT_OK(err, "stat /proc/self/ns/pid")) ++ return err; + +- bss = skel->bss; + bss->dev = st.st_dev; + bss->ino = st.st_ino; + bss->user_pid = 0; + bss->user_tgid = 0; ++ return 0; ++} ++ ++static int test_current_pid_tgid_tp(void *args) ++{ ++ struct test_ns_current_pid_tgid__bss *bss; ++ struct test_ns_current_pid_tgid *skel; ++ int ret = -1, err; ++ pid_t tgid, pid; ++ ++ skel = test_ns_current_pid_tgid__open(); ++ if (!ASSERT_OK_PTR(skel, "test_ns_current_pid_tgid__open")) ++ return ret; ++ ++ bpf_program__set_autoload(skel->progs.tp_handler, true); ++ ++ err = test_ns_current_pid_tgid__load(skel); ++ if (!ASSERT_OK(err, "test_ns_current_pid_tgid__load")) ++ goto cleanup; ++ ++ bss = skel->bss; ++ if (get_pid_tgid(&pid, &tgid, bss)) ++ goto cleanup; + + err = test_ns_current_pid_tgid__attach(skel); +- if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) ++ if (!ASSERT_OK(err, "test_ns_current_pid_tgid__attach")) + goto cleanup; + + /* trigger tracepoint */ + usleep(1); +- ASSERT_EQ(bss->user_pid, pid, "pid"); +- ASSERT_EQ(bss->user_tgid, tgid, "tgid"); +- err = 0; ++ if (!ASSERT_EQ(bss->user_pid, pid, "pid")) ++ goto cleanup; ++ if (!ASSERT_EQ(bss->user_tgid, tgid, "tgid")) ++ goto cleanup; ++ ret = 0; + + cleanup: +- test_ns_current_pid_tgid__destroy(skel); ++ test_ns_current_pid_tgid__destroy(skel); ++ return ret; ++} + +- return err; ++static int test_current_pid_tgid_cgrp(void *args) ++{ ++ struct test_ns_current_pid_tgid__bss *bss; ++ struct test_ns_current_pid_tgid *skel; ++ int server_fd = -1, ret = -1, err; ++ int cgroup_fd = *(int *)args; ++ pid_t tgid, pid; ++ ++ skel = test_ns_current_pid_tgid__open(); ++ if (!ASSERT_OK_PTR(skel, "test_ns_current_pid_tgid__open")) ++ return ret; ++ ++ bpf_program__set_autoload(skel->progs.cgroup_bind4, true); ++ ++ err = test_ns_current_pid_tgid__load(skel); ++ if (!ASSERT_OK(err, "test_ns_current_pid_tgid__load")) ++ goto cleanup; ++ ++ bss = skel->bss; ++ if (get_pid_tgid(&pid, &tgid, bss)) ++ goto cleanup; ++ ++ skel->links.cgroup_bind4 = bpf_program__attach_cgroup( ++ skel->progs.cgroup_bind4, cgroup_fd); ++ if (!ASSERT_OK_PTR(skel->links.cgroup_bind4, "bpf_program__attach_cgroup")) ++ goto cleanup; ++ ++ server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0); ++ if (!ASSERT_GE(server_fd, 0, "start_server")) ++ goto cleanup; ++ ++ if (!ASSERT_EQ(bss->user_pid, pid, "pid")) ++ goto cleanup; ++ if (!ASSERT_EQ(bss->user_tgid, tgid, "tgid")) ++ goto cleanup; ++ ret = 0; ++ ++cleanup: ++ if (server_fd >= 0) ++ close(server_fd); ++ test_ns_current_pid_tgid__destroy(skel); ++ return ret; + } + +-static void test_ns_current_pid_tgid_new_ns(void) ++static void test_ns_current_pid_tgid_new_ns(int (*fn)(void *), void *arg) + { +- int wstatus, duration = 0; ++ int wstatus; + pid_t cpid; + + /* Create a process in a new namespace, this process + * will be the init process of this new namespace hence will be pid 1. + */ +- cpid = clone(test_current_pid_tgid, child_stack + STACK_SIZE, +- CLONE_NEWPID | SIGCHLD, NULL); ++ cpid = clone(fn, child_stack + STACK_SIZE, ++ CLONE_NEWPID | SIGCHLD, arg); + +- if (CHECK(cpid == -1, "clone", "%s\n", strerror(errno))) ++ if (!ASSERT_NEQ(cpid, -1, "clone")) + return; + +- if (CHECK(waitpid(cpid, &wstatus, 0) == -1, "waitpid", "%s\n", strerror(errno))) ++ if (!ASSERT_NEQ(waitpid(cpid, &wstatus, 0), -1, "waitpid")) + return; + +- if (CHECK(WEXITSTATUS(wstatus) != 0, "newns_pidtgid", "failed")) ++ if (!ASSERT_OK(WEXITSTATUS(wstatus), "newns_pidtgid")) + return; + } + ++static void test_in_netns(int (*fn)(void *), void *arg) ++{ ++ struct nstoken *nstoken = NULL; ++ ++ SYS(cleanup, "ip netns add ns_current_pid_tgid"); ++ SYS(cleanup, "ip -net ns_current_pid_tgid link set dev lo up"); ++ ++ nstoken = open_netns("ns_current_pid_tgid"); ++ if (!ASSERT_OK_PTR(nstoken, "open_netns")) ++ goto cleanup; ++ ++ test_ns_current_pid_tgid_new_ns(fn, arg); ++ ++cleanup: ++ if (nstoken) ++ close_netns(nstoken); ++ SYS_NOFAIL("ip netns del ns_current_pid_tgid"); ++} ++ + /* TODO: use a different tracepoint */ + void serial_test_ns_current_pid_tgid(void) + { +- if (test__start_subtest("ns_current_pid_tgid_root_ns")) +- test_current_pid_tgid(NULL); +- if (test__start_subtest("ns_current_pid_tgid_new_ns")) +- test_ns_current_pid_tgid_new_ns(); ++ if (test__start_subtest("root_ns_tp")) ++ test_current_pid_tgid_tp(NULL); ++ if (test__start_subtest("new_ns_tp")) ++ test_ns_current_pid_tgid_new_ns(test_current_pid_tgid_tp, NULL); ++ if (test__start_subtest("new_ns_cgrp")) { ++ int cgroup_fd = -1; ++ ++ cgroup_fd = test__join_cgroup("/sock_addr"); ++ if (ASSERT_GE(cgroup_fd, 0, "join_cgroup")) { ++ test_in_netns(test_current_pid_tgid_cgrp, &cgroup_fd); ++ close(cgroup_fd); ++ } ++ } + } +diff --git a/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c b/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c +index daa952711d8fdf..e9c07d561ded6d 100644 +--- a/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c ++++ b/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c +@@ -1,5 +1,6 @@ + // SPDX-License-Identifier: GPL-2.0 + ++#define _GNU_SOURCE + #include + #include + #include "test_parse_tcp_hdr_opt.skel.h" +diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c +index de2466547efe0f..a1ab0af004549b 100644 +--- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c ++++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c +@@ -18,7 +18,6 @@ + #include + #include + #include +-#include + #include + #include + #include +diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +index af3c31f82a8ae2..5a640173358754 100644 +--- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c ++++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +@@ -421,7 +421,7 @@ static int set_forwarding(bool enable) + + static int __rcv_tstamp(int fd, const char *expected, size_t s, __u64 *tstamp) + { +- struct __kernel_timespec pkt_ts = {}; ++ struct timespec pkt_ts = {}; + char ctl[CMSG_SPACE(sizeof(pkt_ts))]; + struct timespec now_ts; + struct msghdr msg = {}; +@@ -445,7 +445,7 @@ static int __rcv_tstamp(int fd, const char *expected, size_t s, __u64 *tstamp) + + cmsg = CMSG_FIRSTHDR(&msg); + if (cmsg && cmsg->cmsg_level == SOL_SOCKET && +- cmsg->cmsg_type == SO_TIMESTAMPNS_NEW) ++ cmsg->cmsg_type == SO_TIMESTAMPNS) + memcpy(&pkt_ts, CMSG_DATA(cmsg), sizeof(pkt_ts)); + + pkt_ns = pkt_ts.tv_sec * NSEC_PER_SEC + pkt_ts.tv_nsec; +@@ -487,9 +487,9 @@ static int wait_netstamp_needed_key(void) + if (!ASSERT_GE(srv_fd, 0, "start_server")) + goto done; + +- err = setsockopt(srv_fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW, ++ err = setsockopt(srv_fd, SOL_SOCKET, SO_TIMESTAMPNS, + &opt, sizeof(opt)); +- if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS_NEW)")) ++ if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS)")) + goto done; + + cli_fd = connect_to_fd(srv_fd, TIMEOUT_MILLIS); +@@ -571,9 +571,9 @@ static void test_inet_dtime(int family, int type, const char *addr, __u16 port) + return; + + /* Ensure the kernel puts the (rcv) timestamp for all skb */ +- err = setsockopt(listen_fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW, ++ err = setsockopt(listen_fd, SOL_SOCKET, SO_TIMESTAMPNS, + &opt, sizeof(opt)); +- if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS_NEW)")) ++ if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS)")) + goto done; + + if (type == SOCK_STREAM) { +diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c +index 8fe84da1b9b49b..6a2da7a64419ae 100644 +--- a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c ++++ b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c +@@ -1,4 +1,5 @@ + // SPDX-License-Identifier: GPL-2.0 ++#define _GNU_SOURCE + #include + #include "cgroup_helpers.h" + #include "network_helpers.h" +diff --git a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c +index e51721df14fc19..dfff6feac12c3c 100644 +--- a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c ++++ b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c +@@ -4,6 +4,7 @@ + #define _GNU_SOURCE + #include + #include ++#include + #include + #include + #include +diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi.h b/tools/testing/selftests/bpf/progs/cg_storage_multi.h +index a0778fe7857a14..41d59f0ee606c7 100644 +--- a/tools/testing/selftests/bpf/progs/cg_storage_multi.h ++++ b/tools/testing/selftests/bpf/progs/cg_storage_multi.h +@@ -3,8 +3,6 @@ + #ifndef __PROGS_CG_STORAGE_MULTI_H + #define __PROGS_CG_STORAGE_MULTI_H + +-#include +- + struct cgroup_value { + __u32 egress_pkts; + __u32 ingress_pkts; +diff --git a/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c +index f5ac5f3e89196f..568816307f7125 100644 +--- a/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c ++++ b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c +@@ -31,6 +31,7 @@ int BPF_PROG(check_access, struct bpf_map *map, fmode_t fmode) + + if (fmode & FMODE_WRITE) + return -EACCES; ++ barrier(); + + return 0; + } +diff --git a/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c b/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c +index 0763d49f9c4213..d0010e698f6688 100644 +--- a/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c ++++ b/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c +@@ -10,18 +10,29 @@ __u64 user_tgid = 0; + __u64 dev = 0; + __u64 ino = 0; + +-SEC("tracepoint/syscalls/sys_enter_nanosleep") +-int handler(const void *ctx) ++static void get_pid_tgid(void) + { + struct bpf_pidns_info nsdata; + + if (bpf_get_ns_current_pid_tgid(dev, ino, &nsdata, sizeof(struct bpf_pidns_info))) +- return 0; ++ return; + + user_pid = nsdata.pid; + user_tgid = nsdata.tgid; ++} + ++SEC("?tracepoint/syscalls/sys_enter_nanosleep") ++int tp_handler(const void *ctx) ++{ ++ get_pid_tgid(); + return 0; + } + ++SEC("?cgroup/bind4") ++int cgroup_bind4(struct bpf_sock_addr *ctx) ++{ ++ get_pid_tgid(); ++ return 1; ++} ++ + char _license[] SEC("license") = "GPL"; +diff --git a/tools/testing/selftests/bpf/test_cpp.cpp b/tools/testing/selftests/bpf/test_cpp.cpp +index f4936834f76f46..435341c2542085 100644 +--- a/tools/testing/selftests/bpf/test_cpp.cpp ++++ b/tools/testing/selftests/bpf/test_cpp.cpp +@@ -6,6 +6,10 @@ + #include + #include + #include ++ ++#ifndef _Bool ++#define _Bool bool ++#endif + #include "test_core_extern.skel.h" + + template +diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c +index 4d0650cfb5cd8b..fda7589c50236c 100644 +--- a/tools/testing/selftests/bpf/test_lru_map.c ++++ b/tools/testing/selftests/bpf/test_lru_map.c +@@ -126,7 +126,8 @@ static int sched_next_online(int pid, int *next_to_try) + + while (next < nr_cpus) { + CPU_ZERO(&cpuset); +- CPU_SET(next++, &cpuset); ++ CPU_SET(next, &cpuset); ++ next++; + if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) { + ret = 0; + break; +diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c +index 4d582cac2c09e1..74620ed3a166e4 100644 +--- a/tools/testing/selftests/bpf/test_progs.c ++++ b/tools/testing/selftests/bpf/test_progs.c +@@ -10,7 +10,6 @@ + #include + #include + #include +-#include /* backtrace */ + #include /* get_nprocs */ + #include + #include +@@ -19,6 +18,21 @@ + #include + #include "json_writer.h" + ++#ifdef __GLIBC__ ++#include /* backtrace */ ++#endif ++ ++/* Default backtrace funcs if missing at link */ ++__weak int backtrace(void **buffer, int size) ++{ ++ return 0; ++} ++ ++__weak void backtrace_symbols_fd(void *const *buffer, int size, int fd) ++{ ++ dprintf(fd, "\n"); ++} ++ + static bool verbose(void) + { + return env.verbosity > VERBOSE_NONE; +@@ -1690,7 +1704,7 @@ int main(int argc, char **argv) + /* launch workers if requested */ + env.worker_id = -1; /* main process */ + if (env.workers) { +- env.worker_pids = calloc(sizeof(__pid_t), env.workers); ++ env.worker_pids = calloc(sizeof(pid_t), env.workers); + env.worker_socks = calloc(sizeof(int), env.workers); + if (env.debug) + fprintf(stdout, "Launching %d workers.\n", env.workers); +diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c +index 8d994884c7b440..6acffe0426f016 100644 +--- a/tools/testing/selftests/bpf/testing_helpers.c ++++ b/tools/testing/selftests/bpf/testing_helpers.c +@@ -220,13 +220,13 @@ int parse_test_list(const char *s, + bool is_glob_pattern) + { + char *input, *state = NULL, *test_spec; +- int err = 0; ++ int err = 0, cnt = 0; + + input = strdup(s); + if (!input) + return -ENOMEM; + +- while ((test_spec = strtok_r(state ? NULL : input, ",", &state))) { ++ while ((test_spec = strtok_r(cnt++ ? NULL : input, ",", &state))) { + err = insert_test(set, test_spec, is_glob_pattern); + if (err) + break; +diff --git a/tools/testing/selftests/bpf/unpriv_helpers.c b/tools/testing/selftests/bpf/unpriv_helpers.c +index 2a6efbd0401e5b..762e4b5ec95571 100644 +--- a/tools/testing/selftests/bpf/unpriv_helpers.c ++++ b/tools/testing/selftests/bpf/unpriv_helpers.c +@@ -2,7 +2,6 @@ + + #include + #include +-#include + #include + + #include "unpriv_helpers.h" +diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c +index 0ad98b6a8e6ef8..611b5a0a6f7e3e 100644 +--- a/tools/testing/selftests/bpf/veristat.c ++++ b/tools/testing/selftests/bpf/veristat.c +@@ -753,13 +753,13 @@ static int parse_stat(const char *stat_name, struct stat_specs *specs) + static int parse_stats(const char *stats_str, struct stat_specs *specs) + { + char *input, *state = NULL, *next; +- int err; ++ int err, cnt = 0; + + input = strdup(stats_str); + if (!input) + return -ENOMEM; + +- while ((next = strtok_r(state ? NULL : input, ",", &state))) { ++ while ((next = strtok_r(cnt++ ? NULL : input, ",", &state))) { + err = parse_stat(next, specs); + if (err) + return err; +@@ -1444,7 +1444,7 @@ static int parse_stats_csv(const char *filename, struct stat_specs *specs, + while (fgets(line, sizeof(line), f)) { + char *input = line, *state = NULL, *next; + struct verif_stats *st = NULL; +- int col = 0; ++ int col = 0, cnt = 0; + + if (!header) { + void *tmp; +@@ -1462,7 +1462,7 @@ static int parse_stats_csv(const char *filename, struct stat_specs *specs, + *stat_cntp += 1; + } + +- while ((next = strtok_r(state ? NULL : input, ",\n", &state))) { ++ while ((next = strtok_r(cnt++ ? NULL : input, ",\n", &state))) { + if (header) { + /* for the first line, set up spec stats */ + err = parse_stat(next, specs); +diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c +index adb77c1a6a7404..79f2da8f6ead63 100644 +--- a/tools/testing/selftests/bpf/xdp_hw_metadata.c ++++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c +@@ -288,20 +288,6 @@ static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd, clockid_t + return 0; + } + +-struct ethtool_channels { +- __u32 cmd; +- __u32 max_rx; +- __u32 max_tx; +- __u32 max_other; +- __u32 max_combined; +- __u32 rx_count; +- __u32 tx_count; +- __u32 other_count; +- __u32 combined_count; +-}; +- +-#define ETHTOOL_GCHANNELS 0x0000003c /* Get no of channels */ +- + static int rxq_num(const char *ifname) + { + struct ethtool_channels ch = { +diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc +index ff7499eb98d6d7..ce5d2e62731f38 100644 +--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc ++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc +@@ -1,7 +1,7 @@ + #!/bin/sh + # SPDX-License-Identifier: GPL-2.0 + # description: Kprobe event char type argument +-# requires: kprobe_events ++# requires: kprobe_events available_filter_functions + + case `uname -m` in + x86_64) +diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc +index a202b2ea4baf98..4f72c2875f6b9c 100644 +--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc ++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc +@@ -1,7 +1,7 @@ + #!/bin/sh + # SPDX-License-Identifier: GPL-2.0 + # description: Kprobe event string type argument +-# requires: kprobe_events ++# requires: kprobe_events available_filter_functions + + case `uname -m` in + x86_64) +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index 0af1546cc223ab..44c228bcd699d9 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -5174,6 +5174,7 @@ __visible bool kvm_rebooting; + EXPORT_SYMBOL_GPL(kvm_rebooting); + + static DEFINE_PER_CPU(bool, hardware_enabled); ++static DEFINE_MUTEX(kvm_usage_lock); + static int kvm_usage_count; + + static int __hardware_enable_nolock(void) +@@ -5206,10 +5207,10 @@ static int kvm_online_cpu(unsigned int cpu) + * be enabled. Otherwise running VMs would encounter unrecoverable + * errors when scheduled to this CPU. + */ +- mutex_lock(&kvm_lock); ++ mutex_lock(&kvm_usage_lock); + if (kvm_usage_count) + ret = __hardware_enable_nolock(); +- mutex_unlock(&kvm_lock); ++ mutex_unlock(&kvm_usage_lock); + return ret; + } + +@@ -5229,10 +5230,10 @@ static void hardware_disable_nolock(void *junk) + + static int kvm_offline_cpu(unsigned int cpu) + { +- mutex_lock(&kvm_lock); ++ mutex_lock(&kvm_usage_lock); + if (kvm_usage_count) + hardware_disable_nolock(NULL); +- mutex_unlock(&kvm_lock); ++ mutex_unlock(&kvm_usage_lock); + return 0; + } + +@@ -5248,9 +5249,9 @@ static void hardware_disable_all_nolock(void) + static void hardware_disable_all(void) + { + cpus_read_lock(); +- mutex_lock(&kvm_lock); ++ mutex_lock(&kvm_usage_lock); + hardware_disable_all_nolock(); +- mutex_unlock(&kvm_lock); ++ mutex_unlock(&kvm_usage_lock); + cpus_read_unlock(); + } + +@@ -5281,7 +5282,7 @@ static int hardware_enable_all(void) + * enable hardware multiple times. + */ + cpus_read_lock(); +- mutex_lock(&kvm_lock); ++ mutex_lock(&kvm_usage_lock); + + r = 0; + +@@ -5295,7 +5296,7 @@ static int hardware_enable_all(void) + } + } + +- mutex_unlock(&kvm_lock); ++ mutex_unlock(&kvm_usage_lock); + cpus_read_unlock(); + + return r; +@@ -5323,13 +5324,13 @@ static int kvm_suspend(void) + { + /* + * Secondary CPUs and CPU hotplug are disabled across the suspend/resume +- * callbacks, i.e. no need to acquire kvm_lock to ensure the usage count +- * is stable. Assert that kvm_lock is not held to ensure the system +- * isn't suspended while KVM is enabling hardware. Hardware enabling +- * can be preempted, but the task cannot be frozen until it has dropped +- * all locks (userspace tasks are frozen via a fake signal). ++ * callbacks, i.e. no need to acquire kvm_usage_lock to ensure the usage ++ * count is stable. Assert that kvm_usage_lock is not held to ensure ++ * the system isn't suspended while KVM is enabling hardware. Hardware ++ * enabling can be preempted, but the task cannot be frozen until it has ++ * dropped all locks (userspace tasks are frozen via a fake signal). + */ +- lockdep_assert_not_held(&kvm_lock); ++ lockdep_assert_not_held(&kvm_usage_lock); + lockdep_assert_irqs_disabled(); + + if (kvm_usage_count) +@@ -5339,7 +5340,7 @@ static int kvm_suspend(void) + + static void kvm_resume(void) + { +- lockdep_assert_not_held(&kvm_lock); ++ lockdep_assert_not_held(&kvm_usage_lock); + lockdep_assert_irqs_disabled(); + + if (kvm_usage_count)