I'm announcing the release of the 6.4.12 kernel.
All users of the 6.4 kernel series must upgrade.
The updated 6.4.y git tree can be found at: git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-6.4.y and can be browsed at the normal kernel.org git web browser: https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git%3Ba=summa...
thanks,
greg k-h
------------
Documentation/admin-guide/hw-vuln/srso.rst | 4 Documentation/admin-guide/kernel-parameters.txt | 1 Documentation/devicetree/bindings/input/goodix,gt7375p.yaml | 9 Documentation/devicetree/bindings/pinctrl/qcom,sa8775p-tlmm.yaml | 2 Documentation/networking/nf_conntrack-sysctl.rst | 4 Makefile | 2 arch/arm/boot/dts/imx23.dtsi | 2 arch/arm/boot/dts/imx28.dtsi | 2 arch/arm/boot/dts/imx6dl-prtrvt.dts | 4 arch/arm/boot/dts/imx6qdl-phytec-mira.dtsi | 2 arch/arm/boot/dts/imx6qdl-prti6q.dtsi | 11 arch/arm/boot/dts/imx6qdl.dtsi | 2 arch/arm/boot/dts/imx6sx.dtsi | 8 arch/arm/boot/dts/imx6ul.dtsi | 2 arch/arm/boot/dts/imx7s.dtsi | 8 arch/arm64/boot/dts/freescale/imx8mm.dtsi | 7 arch/arm64/boot/dts/freescale/imx93.dtsi | 2 arch/arm64/boot/dts/qcom/ipq5332.dtsi | 7 arch/arm64/boot/dts/qcom/qrb5165-rb5.dts | 2 arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts | 3 arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi | 6 arch/arm64/include/asm/fpsimd.h | 4 arch/arm64/kernel/fpsimd.c | 6 arch/arm64/kernel/ptrace.c | 20 + arch/arm64/kernel/signal.c | 2 arch/parisc/kernel/entry.S | 47 +-- arch/powerpc/kernel/rtas_flash.c | 6 arch/powerpc/mm/kasan/Makefile | 1 arch/riscv/include/asm/insn.h | 15 - arch/riscv/kernel/traps.c | 6 arch/riscv/lib/uaccess.S | 11 arch/um/os-Linux/user_syms.c | 7 arch/x86/include/asm/entry-common.h | 1 arch/x86/include/asm/nospec-branch.h | 49 +-- arch/x86/kernel/alternative.c | 4 arch/x86/kernel/cpu/amd.c | 1 arch/x86/kernel/cpu/bugs.c | 29 +- arch/x86/kernel/kprobes/opt.c | 40 +- arch/x86/kernel/static_call.c | 13 arch/x86/kernel/traps.c | 2 arch/x86/kernel/vmlinux.lds.S | 22 - arch/x86/kvm/svm/svm.c | 2 arch/x86/kvm/svm/vmenter.S | 7 arch/x86/lib/retpoline.S | 137 ++++++---- block/blk-cgroup.c | 2 block/blk-crypto-fallback.c | 36 +- drivers/accel/habanalabs/common/device.c | 15 + drivers/accel/habanalabs/common/habanalabs.h | 2 drivers/accel/habanalabs/common/habanalabs_drv.c | 4 drivers/accel/qaic/qaic_control.c | 26 + drivers/accel/qaic/qaic_data.c | 1 drivers/bluetooth/btusb.c | 3 drivers/bus/ti-sysc.c | 2 drivers/firewire/net.c | 6 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 8 drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c | 38 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h | 3 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 41 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 9 drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 1 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 10 drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 4 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 2 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 5 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c | 27 + drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 2 drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c | 8 drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c | 5 drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c | 20 + drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c | 24 + drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 4 drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c | 4 drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h | 2 drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h | 1 drivers/gpu/drm/amd/include/amd_shared.h | 1 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 4 drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 14 - drivers/gpu/drm/drm_edid.c | 29 -- drivers/gpu/drm/i915/display/intel_sdvo.c | 2 drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c | 22 + drivers/gpu/drm/nouveau/nouveau_connector.c | 11 drivers/gpu/drm/panel/panel-simple.c | 24 - drivers/gpu/drm/qxl/qxl_drv.h | 2 drivers/gpu/drm/qxl/qxl_dumb.c | 5 drivers/gpu/drm/qxl/qxl_gem.c | 25 + drivers/gpu/drm/qxl/qxl_ioctl.c | 6 drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 37 -- drivers/gpu/drm/rcar-du/rcar_du_drv.c | 48 --- drivers/gpu/drm/rcar-du/rcar_du_drv.h | 2 drivers/gpu/drm/rcar-du/rcar_du_regs.h | 3 drivers/gpu/drm/scheduler/sched_entity.c | 6 drivers/gpu/drm/scheduler/sched_main.c | 4 drivers/gpu/drm/stm/ltdc.c | 4 drivers/hid/hid-logitech-hidpp.c | 4 drivers/hid/i2c-hid/i2c-hid-of-goodix.c | 16 + drivers/hid/intel-ish-hid/ipc/hw-ish.h | 1 drivers/hid/intel-ish-hid/ipc/pci-ish.c | 1 drivers/i2c/busses/i2c-bcm-iproc.c | 11 drivers/i2c/busses/i2c-designware-master.c | 16 + drivers/i2c/busses/i2c-hisi.c | 8 drivers/i2c/busses/i2c-tegra.c | 2 drivers/infiniband/hw/bnxt_re/bnxt_re.h | 2 drivers/infiniband/hw/bnxt_re/ib_verbs.c | 16 - drivers/infiniband/hw/bnxt_re/qplib_sp.c | 8 drivers/infiniband/hw/bnxt_re/qplib_sp.h | 4 drivers/infiniband/hw/mana/qp.c | 5 drivers/infiniband/hw/mlx5/qpc.c | 10 drivers/iommu/amd/amd_iommu_types.h | 4 drivers/iommu/amd/init.c | 36 ++ drivers/leds/rgb/leds-qcom-lpg.c | 8 drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c | 4 drivers/media/platform/mediatek/vpu/mtk_vpu.c | 6 drivers/media/platform/qcom/camss/camss-vfe.c | 6 drivers/media/usb/uvc/uvc_v4l2.c | 2 drivers/mmc/core/block.c | 7 drivers/mmc/host/sdhci_f_sdh30.c | 11 drivers/mmc/host/sunplus-mmc.c | 26 - drivers/mmc/host/wbsd.c | 2 drivers/net/dsa/mv88e6xxx/chip.c | 8 drivers/net/ethernet/cadence/macb_main.c | 9 drivers/net/ethernet/intel/i40e/i40e_nvm.c | 16 - drivers/net/ethernet/intel/iavf/iavf_ethtool.c | 10 drivers/net/ethernet/intel/iavf/iavf_fdir.c | 77 +++++ drivers/net/ethernet/intel/iavf/iavf_fdir.h | 2 drivers/net/ethernet/intel/ice/ice_eswitch.c | 6 drivers/net/ethernet/intel/ice/ice_main.c | 5 drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c | 2 drivers/net/ethernet/marvell/octeon_ep/octep_main.c | 9 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h | 2 drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 8 drivers/net/ethernet/microsoft/mana/mana_en.c | 5 drivers/net/ethernet/qlogic/qede/qede_main.c | 10 drivers/net/ethernet/sfc/ef100_nic.c | 2 drivers/net/ethernet/sfc/tc.c | 70 +++++ drivers/net/ethernet/sfc/tc.h | 9 drivers/net/pcs/pcs-rzn1-miic.c | 10 drivers/net/phy/at803x.c | 89 ++---- drivers/net/phy/broadcom.c | 13 drivers/net/phy/phy_device.c | 13 drivers/net/team/team.c | 4 drivers/net/veth.c | 3 drivers/net/virtio_net.c | 6 drivers/pci/controller/dwc/pcie-tegra194.c | 13 drivers/pcmcia/rsrc_nonstatic.c | 2 drivers/pinctrl/qcom/pinctrl-msm.c | 9 drivers/pinctrl/qcom/pinctrl-msm.h | 2 drivers/pinctrl/qcom/pinctrl-sa8775p.c | 1 drivers/regulator/da9063-regulator.c | 12 drivers/regulator/qcom-rpmh-regulator.c | 2 drivers/soc/aspeed/aspeed-socinfo.c | 1 drivers/soc/aspeed/aspeed-uart-routing.c | 2 drivers/thunderbolt/nhi.c | 2 drivers/thunderbolt/nhi.h | 4 drivers/thunderbolt/quirks.c | 8 drivers/thunderbolt/retimer.c | 29 +- drivers/tty/n_gsm.c | 3 drivers/tty/serial/8250/8250_port.c | 1 drivers/tty/serial/fsl_lpuart.c | 4 drivers/tty/serial/stm32-usart.c | 5 drivers/usb/chipidea/ci_hdrc_imx.c | 5 drivers/usb/chipidea/usbmisc_imx.c | 37 ++ drivers/usb/gadget/function/u_serial.c | 3 drivers/usb/gadget/function/uvc_video.c | 32 +- drivers/usb/host/xhci-histb.c | 12 drivers/usb/host/xhci-mtk.c | 6 drivers/usb/host/xhci-pci.c | 7 drivers/usb/host/xhci-plat.c | 7 drivers/usb/host/xhci-tegra.c | 1 drivers/usb/host/xhci.h | 2 drivers/vdpa/mlx5/core/mlx5_vdpa.h | 2 drivers/vdpa/mlx5/core/mr.c | 97 +++++-- drivers/vdpa/mlx5/net/mlx5_vnet.c | 4 drivers/vdpa/vdpa.c | 9 drivers/vdpa/vdpa_user/vduse_dev.c | 8 drivers/video/fbdev/mmp/hw/mmp_ctrl.c | 4 drivers/virtio/virtio_mmio.c | 5 drivers/virtio/virtio_vdpa.c | 2 drivers/watchdog/sp5100_tco.c | 4 fs/btrfs/block-group.c | 12 fs/btrfs/block-group.h | 5 fs/btrfs/ctree.h | 1 fs/btrfs/delayed-inode.c | 5 fs/btrfs/delayed-inode.h | 1 fs/btrfs/extent_io.c | 25 + fs/btrfs/extent_map.c | 6 fs/btrfs/inode.c | 131 +++++---- fs/btrfs/scrub.c | 3 fs/btrfs/volumes.c | 3 fs/ceph/mds_client.c | 4 fs/gfs2/super.c | 26 + fs/netfs/iterator.c | 2 fs/ntfs3/frecord.c | 16 - fs/ntfs3/fsntfs.c | 2 fs/ntfs3/index.c | 6 fs/ntfs3/ntfs_fs.h | 2 fs/ntfs3/record.c | 10 fs/ntfs3/super.c | 98 +++++-- fs/ntfs3/xattr.c | 1 fs/smb/client/cifs_debug.c | 10 fs/smb/client/cifsfs.c | 28 -- fs/smb/client/file.c | 25 + fs/smb/client/fs_context.c | 4 fs/smb/client/smb2pdu.c | 6 include/drm/drm_edid.h | 12 include/linux/iopoll.h | 2 include/linux/virtio_net.h | 4 include/media/v4l2-mem2mem.h | 18 + include/net/mana/mana.h | 4 include/net/sock.h | 6 include/net/xfrm.h | 1 kernel/dma/remap.c | 4 kernel/trace/ring_buffer.c | 14 - kernel/trace/trace.c | 3 net/bluetooth/l2cap_core.c | 5 net/bluetooth/mgmt.c | 2 net/core/sock.c | 2 net/ipv4/ip_vti.c | 4 net/ipv4/tcp_timer.c | 4 net/ipv6/ip6_vti.c | 4 net/key/af_key.c | 4 net/netfilter/ipvs/ip_vs_ctl.c | 4 net/netfilter/nf_conntrack_proto_sctp.c | 6 net/netfilter/nf_tables_api.c | 44 ++- net/netfilter/nft_dynset.c | 3 net/netfilter/nft_set_pipapo.c | 36 +- net/openvswitch/datapath.c | 8 net/unix/af_unix.c | 9 net/xfrm/xfrm_compat.c | 2 net/xfrm/xfrm_input.c | 22 - net/xfrm/xfrm_interface_core.c | 4 net/xfrm/xfrm_state.c | 8 net/xfrm/xfrm_user.c | 15 - rust/macros/vtable.rs | 1 sound/hda/hdac_regmap.c | 7 sound/pci/hda/patch_realtek.c | 58 +++- sound/soc/amd/Kconfig | 1 sound/soc/amd/vangogh/acp5x.h | 2 sound/soc/amd/vangogh/pci-acp5x.c | 7 sound/soc/codecs/cs35l56.c | 12 sound/soc/codecs/max98363.c | 9 sound/soc/codecs/rt5665.c | 2 sound/soc/intel/boards/sof_sdw.c | 35 ++ sound/soc/intel/boards/sof_sdw_rt711_sdca.c | 3 sound/soc/meson/axg-tdm-formatter.c | 42 +-- sound/soc/sof/amd/acp.h | 3 sound/soc/sof/amd/pci-rmb.c | 3 sound/soc/sof/amd/pci-rn.c | 3 sound/soc/sof/core.c | 4 sound/soc/sof/intel/hda-dai-ops.c | 13 sound/soc/sof/intel/hda-dai.c | 8 sound/soc/sof/intel/hda.c | 12 sound/soc/sof/intel/hda.h | 2 sound/usb/quirks-table.h | 29 ++ tools/objtool/arch/x86/decode.c | 11 tools/objtool/check.c | 45 ++- tools/objtool/include/objtool/arch.h | 1 tools/objtool/include/objtool/elf.h | 1 tools/perf/util/machine.c | 5 tools/perf/util/thread-stack.c | 4 tools/testing/selftests/net/forwarding/mirror_gre_changes.sh | 3 262 files changed, 2085 insertions(+), 971 deletions(-)
Abel Vesa (1): regulator: qcom-rpmh: Fix LDO 12 regulator for PM8550
Abel Wu (1): sock: Fix misuse of sk_under_memory_pressure()
Alex Deucher (1): Revert "Revert "drm/amdgpu/display: change pipe policy for DCN 2.0""
Alexander Stein (1): arm64: dts: imx93: Fix anatop node size
Alexandre Ghiti (1): riscv: uaccess: Return the number of bytes effectively not copied
Alfred Lee (1): net: dsa: mv88e6xxx: Wait for EEPROM done before HW reset
Alvin Lee (2): drm/amd/display: Update DTBCLK for DCN32 drm/amd/display: Apply 60us prefetch for DCFCLK <= 300Mhz
Anand Jain (1): btrfs: fix replace/scrub failure with metadata_uuid
Andrej Picej (1): ARM: dts: imx6: phytec: fix RTC interrupt level
Andrey Konovalov (1): media: camss: set VFE bpl_alignment to 16 for sdm845 and sm8250
Andrii Staikov (1): i40e: fix misleading debug logs
Andy Shevchenko (1): Bluetooth: MGMT: Use correct address for memcpy()
Armin Wolf (1): pcmcia: rsrc_nonstatic: Fix memory leak in nonstatic_release_resource_db()
Arnaldo Carvalho de Melo (1): Revert "perf report: Append inlines to non-DWARF callchains"
Arnd Bergmann (1): ASoC: amd: vangogh: select CONFIG_SND_AMD_ACP_CONFIG
Aurabindo Pillai (1): Revert "drm/amd/display: disable SubVP + DRR to prevent underflow"
Avichal Rakesh (1): usb: gadget: uvc: queue empty isoc requests if no video buffer is available
Bard Liao (3): ASoC: Intel: sof_sdw: add quirk for MTL RVP ASoC: Intel: sof_sdw_rt_sdca_jack_common: test SOF_JACK_JDSRC in _exit ASoC: max98363: don't return on success reading revision ID
Benjamin Gray (1): powerpc/kasan: Disable KCOV in KASAN code
Borislav Petkov (AMD) (4): x86/srso: Explain the untraining sequences a bit more x86/CPU/AMD: Fix the DIV(0) initial fix attempt x86/srso: Disable the mitigation on unaffected configurations x86/srso: Correct the mitigation status when SMT is disabled
Celeste Liu (1): riscv: entry: set a0 = -ENOSYS only when syscall != -1
Chen Lin (1): ring-buffer: Do not swap cpu_buffer during resize process
Chen-Yu Tsai (1): media: mtk-jpeg: Set platform driver data earlier
Chengfeng Ye (1): i2c: bcm-iproc: Fix bcm_iproc_i2c_isr deadlock issue
Chris Mason (1): btrfs: only subtract from len_to_oe_boundary when it is tracking an extent
Christophe JAILLET (1): net: phy: at803x: Use devm_regulator_get_enable_optional()
Christopher Obbard (2): arm64: dts: rockchip: Disable HS400 for eMMC on ROCK Pi 4 arm64: dts: rockchip: Disable HS400 for eMMC on ROCK 4C+
Dan Carpenter (1): accel/qaic: Clean up integer overflow checking in map_user_pages()
Daniel Miess (3): drm/amd/display: Remove v_startup workaround for dcn3+ drm/amd/display: Enable dcn314 DPP RCO drm/amd/display: disable RCO for DCN314
Danilo Krummrich (1): drm/scheduler: set entity to NULL in drm_sched_entity_pop_job()
David Howells (1): crypto, cifs: fix error handling in extract_iter_to_sg()
Dmitry Baryshkov (1): arm64: dts: qcom: qrb5165-rb5: fix thermal zone conflict
Dragos Tatulea (3): vdpa/mlx5: Fix mr->initialized semantics vdpa: Enable strict validation for netlinks ops net/mlx5e: XDP, Fix fifo overrun on XDP_REDIRECT
Edward Cree (3): sfc: add fallback action-set-lists for TC offload sfc: don't unregister flow_indr if it was never registered sfc: don't fail probe if MAE/TC setup fails
Edward Lo (2): fs/ntfs3: Enhance sanity check while generating attr_list fs/ntfs3: Return error for inconsistent extended attributes
Eric Dumazet (1): net: do not allow gso_size to be set to GSO_BY_FRAGS
Eugenio Pérez (1): vdpa/mlx5: Delete control vq iotlb in destroy_mr only when necessary
Even Xu (1): HID: intel-ish-hid: ipc: Add Arrow Lake PCI device ID
Fabio Estevam (1): arm64: dts: imx8mm: Drop CSI1 PHY reference clock configuration
Fei Shao (2): dt-bindings: input: goodix: Add "goodix,no-reset-during-suspend" property HID: i2c-hid: goodix: Add support for "goodix,no-reset-during-suspend" property
Filipe Manana (2): btrfs: fix use-after-free of new block group that became unused btrfs: fix infinite directory reads
Florian Westphal (3): netfilter: nf_tables: fix false-positive lockdep splat netfilter: nf_tables: deactivate catchall elements in next generation netfilter: nf_tables: don't fail inserts if duplicate has expired
Gal Pressman (1): virtio-vdpa: Fix cpumask memory leak in virtio_vdpa_find_vqs()
Geert Uytterhoeven (1): iopoll: Call cpu_relax() in busy loops
Greg Kroah-Hartman (1): Linux 6.4.12
Guchun Chen (1): drm/amdgpu: keep irq count in amdgpu_irq_disable_all
Hans Verkuil (1): media: platform: mediatek: vpu: fix NULL ptr dereference
Harshit Mogalapalli (1): mmc: sunplus: Fix error handling in spmmc_drv_probe()
Hawkins Jiawei (1): virtio-net: Zero max_tx_vq field for VIRTIO_NET_CTRL_MQ_HASH_CONFIG case
Helge Deller (1): parisc: Fix CONFIG_TLB_PTLOCK to work with lightweight spinlock checks
Herbert Xu (1): xfrm: Silence warnings triggerable by bad packets
Jack Xiao (1): drm/amdgpu: fix memory leak in mes self test
Jakub Kicinski (1): net: openvswitch: reject negative ifindex
Jani Nikula (2): Revert "drm/edid: Fix csync detailed mode parsing" drm/i915/sdvo: fix panel_type initialization
Jason Wang (1): virtio-net: set queues after driver_ok
Jason Xing (1): net: fix the RTO timer retransmitting skb every 1ms if linear option is enabled
Jerome Brunet (1): ASoC: meson: axg-tdm-formatter: fix channel slot allocation
Jia-Ju Bai (1): fs: ntfs3: Fix possible null-pointer dereferences in mi_read()
Jiasheng Jiang (1): soc: aspeed: socinfo: Add kfree for kstrdup
Josef Bacik (1): btrfs: fix incorrect splitting in btrfs_drop_extent_map_range
Justin Chen (1): net: phy: broadcom: stub c45 read/write for 54810
Kailang Yang (1): ALSA: hda/realtek - Remodified 3k pull low procedure
Karol Herbst (1): drm/nouveau/disp: fix use-after-free in error handling of nouveau_connector_create
Kashyap Desai (1): RDMA/bnxt_re: consider timeout of destroy ah as success.
Kathiravan T (1): arm64: dts: qcom: ipq5332: add QFPROM node
Konstantin Komarov (2): fs/ntfs3: Mark ntfs dirty when on-disk struct is corrupted fs/ntfs3: Alternative boot if primary boot is corrupted
Kuniyuki Iwashima (1): af_unix: Fix null-ptr-deref in unix_stream_sendpage().
Lang Yu (2): drm/amdgpu: install stub fence into potential unused fence pointers drm/amdgpu: unmap and remove csa_va properly
Laurent Pinchart (1): media: uvcvideo: Fix menu count handling for userspace XU mappings
Leon Romanovsky (2): xfrm: delete offloaded policy xfrm: don't skip free of empty state in acquire policy
Li Yang (1): net: phy: at803x: fix the wol setting functions
Liang Chen (1): net: veth: Page pool creation error handling for existing pools only
Lin Ma (8): vdpa: Add features attr to vdpa_nl_policy for nlattr length check vdpa: Add queue index attr to vdpa_nl_policy for nlattr length check vdpa: Add max vqp attr to vdpa_nl_policy for nlattr length check net: xfrm: Fix xfrm_address_filter OOB read net: af_key: fix sadb_x_filter validation net: xfrm: Amend XFRMA_SEC_CTX nla_policy structure xfrm: add NULL check in xfrm_update_ae_params xfrm: add forgotten nla_policy for XFRMA_MTIMER_THRESH
Long Li (1): RDMA/mana_ib: Use v2 version of cfg_rx_steer_req to enable RX coalescing
Longlong Yao (1): drm/amdgpu: fix calltrace warning in amddrm_buddy_fini
Lu Hongfei (1): led: qcom-lpg: Fix resource leaks in for_each_available_child_of_node() loops
Luca Ceresoli (1): drm/panel: simple: Fix AUO G121EAN01 panel timings according to the docs
Luke D. Jones (5): ALSA: hda/realtek: Add quirk for ASUS ROG GX650P ALSA: hda/realtek: Add quirk for ASUS ROG GA402X ALSA: hda/realtek: Amend G634 quirk to enable rear speakers ALSA: hda/realtek: Add quirk for ASUS ROG G614Jx ALSA: hda/realtek: Add quirk for ASUS ROG GZ301V
Manish Chopra (1): qede: fix firmware halt over suspend and resume
Marcin Szycik (1): ice: Block switchdev mode when ADQ is active and vice versa
Mario Limonciello (1): drm/amd: flush any delayed gfxoff on suspend entry
Mark Brown (2): arm64/ptrace: Ensure that SME is set up for target when writing SSVE state arm64/ptrace: Ensure that the task sees ZT writes on first use
Martin Fuzzey (1): regulator: da9063: better fix null deref with partial DT
Masahiro Yamada (1): Revert "[PATCH] uml: export symbols added by GCC hardened"
Mathias Nyman (1): xhci: get rid of XHCI_PLAT quirk that used to prevent MSI setup
Matthew Anderson (1): Bluetooth: btusb: Add MT7922 bluetooth ID for the Asus Ally
Maxime Coquelin (1): vduse: Use proper spinlock for IRQ injection
Michal Schmidt (4): octeon_ep: fix timeout value for waiting on mbox response octeon_ep: cancel tx_timeout_task later in remove sequence octeon_ep: cancel ctrl_mbox_task after intr_poll_task octeon_ep: cancel queued works in probe error path
Mika Westerberg (3): thunderbolt: Read retimer NVM authentication status prior tb_retimer_set_inbound_sbtx() thunderbolt: Add Intel Barlow Ridge PCI ID thunderbolt: Limit Intel Barlow Ridge USB3 bandwidth
Ming Lei (1): blk-cgroup: hold queue_lock when removing blkg->q_node
Moti Haimovski (1): accel/habanalabs: fix mem leak in capture user mappings
Nam Cao (1): riscv: correct riscv_insn_is_c_jr() and riscv_insn_is_c_jalr()
Naohiro Aota (1): btrfs: move out now unused BG from the reclaim list
Nathan Lynch (1): powerpc/rtas_flash: allow user copy to flash block cache objects
Nicholas Kazlauskas (1): drm/amd/display: Skip DPP DTO update if root clock is gated
Ninad Naik (1): pinctrl: qcom: Add intr_target_width field to support increased number of interrupt targets
Ofir Bitton (1): accel/habanalabs: add pci health check during heartbeat
Oleksij Rempel (1): ARM: dts: imx6dl: prtrvt, prtvt7, prti6q, prtwd2: fix USB related warnings
Pablo Neira Ayuso (3): netfilter: nf_tables: fix GC transaction races with netns and netlink event exit path netfilter: nf_tables: GC transaction race with netns dismantle netfilter: nft_dynset: disallow object maps
Parker Newman (1): i2c: tegra: Fix i2c-tegra DMA config option processing
Patrisious Haddad (1): RDMA/mlx5: Return the firmware result upon destroying QP/RQ
Paulo Alcantara (1): smb: client: fix warning in cifs_smb3_do_mount()
Peter Ujfalusi (2): ASoC: Intel: sof_sdw: add quirk for LNL RVP ASoC: SOF: core: Free the firmware trace before calling snd_sof_shutdown()
Peter Zijlstra (11): x86/cpu: Fix __x86_return_thunk symbol type x86/cpu: Fix up srso_safe_ret() and __x86_return_thunk() objtool/x86: Fix SRSO mess x86/alternative: Make custom return thunk unconditional x86/cpu: Clean up SRSO return thunk mess x86/cpu: Rename original retbleed methods x86/cpu: Rename srso_(.*)_alias to srso_alias_\1 x86/cpu: Cleanup the untrain mess x86/cpu/kvm: Provide UNTRAIN_RET_VM objtool/x86: Fixup frame-pointer vs rethunk x86/static_call: Fix __static_call_fixup()
Petr Machata (1): selftests: mirror_gre_changes: Tighten up the TTL test match
Petr Pavlu (2): x86/retpoline,kprobes: Fix position of thunk sections with CONFIG_LTO_CLANG x86/retpoline,kprobes: Skip optprobe check for indirect jumps with retpolines and IBT
Pierre-Louis Bossart (2): ASoC: Intel: sof_sdw: add quick for Dell SKU 0BDA ASoC: SOF: Intel: fix SoundWire/HDaudio mutual exclusion
Piotr Gardocki (1): iavf: fix FDIR rule fields masks validation
Pranjal Ramajor Asha Kanojiya (1): accel/qaic: Fix slicing memory leak
Prashanth K (1): usb: gadget: u_serial: Avoid spinlock recursion in __gs_console_push
Qingsong Chen (1): rust: macros: vtable: fix `HAS_*` redefinition (`gen_const_name`)
Quan Nguyen (1): i2c: designware: Correct length byte validation logic
Radhey Shyam Pandey (1): net: macb: In ZynqMP resume always configure PS GTR for non-wakeup source
Ranjani Sridharan (1): ASoC: SOF: intel: hda: Clean up link DMA for IPC3 during stop
Raphael Gallais-Pou (1): drm/stm: ltdc: fix late dereference check
Russell Harmon via samba-technical (1): cifs: Release folio lock on fscache read hit.
Russell King (Oracle) (1): net: phy: fix IRQ-based wake-on-lan over hibernate / power off
Scott Mayhew (1): smb: client: fix null auth
Sean Christopherson (1): x86/retpoline: Don't clobber RFLAGS during srso_safe_ret()
Shazad Hussain (1): dt-bindings: pinctrl: qcom,sa8775p-tlmm: add gpio function constant
Sherry Sun (1): tty: serial: fsl_lpuart: Clear the error flags by writing 1 for lpuart32 platforms
Simon Trimmer (1): ASoC: cs35l56: Move DSP part string generation so that it is done only once
Sishuai Gong (1): ipvs: fix racy memcpy in proc_do_sync_threshold
Stefan Binding (1): ALSA: hda/realtek: Add quirks for HP G11 Laptops
Stefan Wahren (1): ARM: dts: imx: Adjust dma-apbh node name
Steve French (2): cifs: fix potential oops in cifs_oplock_break smb3: display network namespace in debug information
Sumit Gupta (1): PCI: tegra194: Fix possible array out of bounds access
Suravee Suthikulpanit (1): iommu/amd: Introduce Disable IRTE Caching Support
Sweet Tea Dorminy (1): blk-crypto: dynamically allocate fallback profile
Tam Nguyen (1): i2c: designware: Handle invalid SMBus block data response length value
Tim Huang (2): drm/amdgpu: skip fence GFX interrupts disable/enable for S0ix drm/amd/pm: skip the RLC stop when S0i3 suspend for SMU v13.0.4/11
Tony Lindgren (2): bus: ti-sysc: Flush posted write on enable before reset serial: 8250: Fix oops for port->pm on uart_change_pm()
Tuo Li (2): gfs2: Fix possible data races in gfs2_show_options() ALSA: hda: fix a possible null-pointer dereference due to data race in snd_hdac_regmap_sync()
Uday M Bhat (1): ASoC: Intel: sof_sdw: Add support for Rex soundwire
Umio Yasuno (1): drm/amdgpu/pm: fix throttle_status for other than MP1 11.0.7
Uwe Kleine-König (1): serial: stm32: Ignore return value of uart_remove_one_port() in .remove()
Venkata Prasad Potturu (2): ASoC: SOF: amd: Add pci revision id check ASoC: amd: vangogh: Add check for acp config flags in vangogh platform
Vinay Belgaumkar (1): drm/i915/guc/slpc: Restore efficient freq earlier
Wander Lairson Costa (1): drm/qxl: fix UAF on handle creation
Wei Chen (1): mmc: sunplus: fix return value check of mmc_add_host()
Winston Wen (1): cifs: fix session state check in reconnect to avoid use-after-free issue
Wolfram Sang (2): drm: rcar-du: remove R-Car H3 ES1.* workarounds virtio-mmio: don't break lifecycle of vm_dev
Xiang Yang (1): net: pcs: Add missing put_device call in miic_create
Xiaolei Wang (2): ARM: dts: imx: Set default tuning step for imx7d usdhc ARM: dts: imx: Set default tuning step for imx6sx usdhc
Xin Long (1): netfilter: set default timeout to 3 secs for sctp shutdown send and recv state
Xiubo Li (1): ceph: try to dump the msgs when decoding fails
Xu Yang (3): usb: chipidea: imx: don't request QoS for imx8ulp usb: chipidea: imx: turn off vbus comparator when suspend usb: chipidea: imx: add missing USB PHY DPDM wakeup setting
Yang Yingliang (1): mmc: wbsd: fix double mmc_free_host() in wbsd_init()
Yangtao Li (1): mmc: f-sdh30: fix order of function calls in sdhci_f_sdh30_remove
Yi Yang (1): tty: n_gsm: fix the UAF caused by race condition in gsm_cleanup_mux
Yibin Ding (1): mmc: block: Fix in_flight[issue_type] value error
Yicong Yang (1): i2c: hisi: Only handle the interrupt of the driver's transfer
Yogesh Hegde (1): arm64: dts: rockchip: Fix Wifi/Bluetooth on ROCK Pi 4 boards
Yuanjun Gong (1): fbdev: mmp: fix value check in mmphw_probe()
Yuechao Zhao (1): watchdog: sp5100_tco: support Hygon FCH/SCH (Server Controller Hub)
Yunfei Dong (1): media: v4l2-mem2mem: add lock to protect parameter num_rdy
Zev Weiss (1): soc: aspeed: uart-routing: Use __sysfs_match_string
Zhang Shurong (2): firewire: net: fix use after free in fwnet_finish_incoming_packet() ASoC: rt5665: add missed regulator_bulk_disable
Zhengchao Shao (3): xfrm: fix slab-use-after-free in decode_session6 ip6_vti: fix slab-use-after-free in decode_session6 ip_vti: fix potential slab-use-after-free in decode_session6
Zhengping Jiang (1): Bluetooth: L2CAP: Fix use-after-free
Ziyang Xuan (1): team: Fix incorrect deletion of ETH_P_8021AD protocol vid from slaves
dengxiang (2): ALSA: hda/realtek: Add quirks for Unis H3C Desktop B760 & Q760 ALSA: usb-audio: Add support for Mythware XA001AU capture and playback interfaces.
gaoxu (1): dma-remap: use kvmalloc_array/kvfree for larger dma memory remap
hackyzh002 (1): drm/amdgpu: Fix integer overflow in amdgpu_cs_pass1
shanzhulig (1): drm/amdgpu: Fix potential fence use-after-free v2
stuarthayhurst (1): HID: logitech-hidpp: Add USB and Bluetooth IDs for the Logitech G915 TKL Keyboard
xiaoshoukui (1): btrfs: fix BUG_ON condition in btrfs_cancel_balance
diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst index 2f923c805802..f79cb11b080f 100644 --- a/Documentation/admin-guide/hw-vuln/srso.rst +++ b/Documentation/admin-guide/hw-vuln/srso.rst @@ -124,8 +124,8 @@ sequence. To ensure the safety of this mitigation, the kernel must ensure that the safe return sequence is itself free from attacker interference. In Zen3 and Zen4, this is accomplished by creating a BTB alias between the -untraining function srso_untrain_ret_alias() and the safe return -function srso_safe_ret_alias() which results in evicting a potentially +untraining function srso_alias_untrain_ret() and the safe return +function srso_alias_safe_ret() which results in evicting a potentially poisoned BTB entry and using that safe one for all function returns.
In older Zen1 and Zen2, this is accomplished using a reinterpretation diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index a8fc0eb6fb1d..732391193182 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -323,6 +323,7 @@ option with care. pgtbl_v1 - Use v1 page table for DMA-API (Default). pgtbl_v2 - Use v2 page table for DMA-API. + irtcachedis - Disable Interrupt Remapping Table (IRT) caching.
amd_iommu_dump= [HW,X86-64] Enable AMD IOMMU driver option to dump the ACPI table diff --git a/Documentation/devicetree/bindings/input/goodix,gt7375p.yaml b/Documentation/devicetree/bindings/input/goodix,gt7375p.yaml index ce18d7dadae2..1edad1da1196 100644 --- a/Documentation/devicetree/bindings/input/goodix,gt7375p.yaml +++ b/Documentation/devicetree/bindings/input/goodix,gt7375p.yaml @@ -43,6 +43,15 @@ properties: itself as long as it allows the main board to make signals compatible with what the touchscreen is expecting for its IO rails.
+ goodix,no-reset-during-suspend: + description: + Set this to true to enforce the driver to not assert the reset GPIO + during suspend. + Due to potential touchscreen hardware flaw, back-powering could happen in + suspend if the power supply is on and with active-low reset GPIO asserted. + This property is used to avoid the back-powering issue. + type: boolean + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sa8775p-tlmm.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sa8775p-tlmm.yaml index e608a4f1bcae..e119a226a4b1 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,sa8775p-tlmm.yaml +++ b/Documentation/devicetree/bindings/pinctrl/qcom,sa8775p-tlmm.yaml @@ -87,7 +87,7 @@ $defs: emac0_mdc, emac0_mdio, emac0_ptp_aux, emac0_ptp_pps, emac1_mcg0, emac1_mcg1, emac1_mcg2, emac1_mcg3, emac1_mdc, emac1_mdio, emac1_ptp_aux, emac1_ptp_pps, gcc_gp1, gcc_gp2, gcc_gp3, - gcc_gp4, gcc_gp5, hs0_mi2s, hs1_mi2s, hs2_mi2s, ibi_i3c, + gcc_gp4, gcc_gp5, gpio, hs0_mi2s, hs1_mi2s, hs2_mi2s, ibi_i3c, jitter_bist, mdp0_vsync0, mdp0_vsync1, mdp0_vsync2, mdp0_vsync3, mdp0_vsync4, mdp0_vsync5, mdp0_vsync6, mdp0_vsync7, mdp0_vsync8, mdp1_vsync0, mdp1_vsync1, mdp1_vsync2, mdp1_vsync3, mdp1_vsync4, diff --git a/Documentation/networking/nf_conntrack-sysctl.rst b/Documentation/networking/nf_conntrack-sysctl.rst index 8b1045c3b59e..c383a394c665 100644 --- a/Documentation/networking/nf_conntrack-sysctl.rst +++ b/Documentation/networking/nf_conntrack-sysctl.rst @@ -178,10 +178,10 @@ nf_conntrack_sctp_timeout_established - INTEGER (seconds) Default is set to (hb_interval * path_max_retrans + rto_max)
nf_conntrack_sctp_timeout_shutdown_sent - INTEGER (seconds) - default 0.3 + default 3
nf_conntrack_sctp_timeout_shutdown_recd - INTEGER (seconds) - default 0.3 + default 3
nf_conntrack_sctp_timeout_shutdown_ack_sent - INTEGER (seconds) default 3 diff --git a/Makefile b/Makefile index d0efd84bb7d0..0ff13b943f99 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 4 -SUBLEVEL = 11 +SUBLEVEL = 12 EXTRAVERSION = NAME = Hurr durr I'ma ninja sloth
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi index d19508c8f9ed..a3668a0827fc 100644 --- a/arch/arm/boot/dts/imx23.dtsi +++ b/arch/arm/boot/dts/imx23.dtsi @@ -59,7 +59,7 @@ icoll: interrupt-controller@80000000 { reg = <0x80000000 0x2000>; };
- dma_apbh: dma-apbh@80004000 { + dma_apbh: dma-controller@80004000 { compatible = "fsl,imx23-dma-apbh"; reg = <0x80004000 0x2000>; interrupts = <0 14 20 0 diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi index a8d3c3113e0f..29e37b1fae66 100644 --- a/arch/arm/boot/dts/imx28.dtsi +++ b/arch/arm/boot/dts/imx28.dtsi @@ -78,7 +78,7 @@ hsadc: hsadc@80002000 { status = "disabled"; };
- dma_apbh: dma-apbh@80004000 { + dma_apbh: dma-controller@80004000 { compatible = "fsl,imx28-dma-apbh"; reg = <0x80004000 0x2000>; interrupts = <82 83 84 85 diff --git a/arch/arm/boot/dts/imx6dl-prtrvt.dts b/arch/arm/boot/dts/imx6dl-prtrvt.dts index 56bb1ca56a2d..36b031236e47 100644 --- a/arch/arm/boot/dts/imx6dl-prtrvt.dts +++ b/arch/arm/boot/dts/imx6dl-prtrvt.dts @@ -124,6 +124,10 @@ &usbh1 { status = "disabled"; };
+&usbotg { + disable-over-current; +}; + &vpu { status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6qdl-phytec-mira.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-mira.dtsi index 1a599c294ab8..1ca4d219609f 100644 --- a/arch/arm/boot/dts/imx6qdl-phytec-mira.dtsi +++ b/arch/arm/boot/dts/imx6qdl-phytec-mira.dtsi @@ -182,7 +182,7 @@ i2c_rtc: rtc@68 { pinctrl-0 = <&pinctrl_rtc_int>; reg = <0x68>; interrupt-parent = <&gpio7>; - interrupts = <8 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <8 IRQ_TYPE_LEVEL_LOW>; status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/imx6qdl-prti6q.dtsi b/arch/arm/boot/dts/imx6qdl-prti6q.dtsi index f0db0d4471f4..36f84f4da6b0 100644 --- a/arch/arm/boot/dts/imx6qdl-prti6q.dtsi +++ b/arch/arm/boot/dts/imx6qdl-prti6q.dtsi @@ -69,6 +69,7 @@ &usbh1 { vbus-supply = <®_usb_h1_vbus>; phy_type = "utmi"; dr_mode = "host"; + disable-over-current; status = "okay"; };
@@ -78,10 +79,18 @@ &usbotg { pinctrl-0 = <&pinctrl_usbotg>; phy_type = "utmi"; dr_mode = "host"; - disable-over-current; + over-current-active-low; status = "okay"; };
+&usbphynop1 { + status = "disabled"; +}; + +&usbphynop2 { + status = "disabled"; +}; + &usdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc1>; diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index b72ec745f6d1..bda182edc589 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -150,7 +150,7 @@ soc: soc { interrupt-parent = <&gpc>; ranges;
- dma_apbh: dma-apbh@110000 { + dma_apbh: dma-controller@110000 { compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh"; reg = <0x00110000 0x2000>; interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>, diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index 93ac2380ca1e..fc0654e3fe95 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -209,7 +209,7 @@ gpu: gpu@1800000 { power-domains = <&pd_pu>; };
- dma_apbh: dma-apbh@1804000 { + dma_apbh: dma-controller@1804000 { compatible = "fsl,imx6sx-dma-apbh", "fsl,imx28-dma-apbh"; reg = <0x01804000 0x2000>; interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, @@ -980,6 +980,8 @@ usdhc1: mmc@2190000 { <&clks IMX6SX_CLK_USDHC1>; clock-names = "ipg", "ahb", "per"; bus-width = <4>; + fsl,tuning-start-tap = <20>; + fsl,tuning-step= <2>; status = "disabled"; };
@@ -992,6 +994,8 @@ usdhc2: mmc@2194000 { <&clks IMX6SX_CLK_USDHC2>; clock-names = "ipg", "ahb", "per"; bus-width = <4>; + fsl,tuning-start-tap = <20>; + fsl,tuning-step= <2>; status = "disabled"; };
@@ -1004,6 +1008,8 @@ usdhc3: mmc@2198000 { <&clks IMX6SX_CLK_USDHC3>; clock-names = "ipg", "ahb", "per"; bus-width = <4>; + fsl,tuning-start-tap = <20>; + fsl,tuning-step= <2>; status = "disabled"; };
diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi index 3d9d0f823568..118764c50d92 100644 --- a/arch/arm/boot/dts/imx6ul.dtsi +++ b/arch/arm/boot/dts/imx6ul.dtsi @@ -164,7 +164,7 @@ intc: interrupt-controller@a01000 { <0x00a06000 0x2000>; };
- dma_apbh: dma-apbh@1804000 { + dma_apbh: dma-controller@1804000 { compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh"; reg = <0x01804000 0x2000>; interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>, diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi index efe2525b62fa..6ffb428dc939 100644 --- a/arch/arm/boot/dts/imx7s.dtsi +++ b/arch/arm/boot/dts/imx7s.dtsi @@ -1184,6 +1184,8 @@ usdhc1: mmc@30b40000 { <&clks IMX7D_USDHC1_ROOT_CLK>; clock-names = "ipg", "ahb", "per"; bus-width = <4>; + fsl,tuning-step = <2>; + fsl,tuning-start-tap = <20>; status = "disabled"; };
@@ -1196,6 +1198,8 @@ usdhc2: mmc@30b50000 { <&clks IMX7D_USDHC2_ROOT_CLK>; clock-names = "ipg", "ahb", "per"; bus-width = <4>; + fsl,tuning-step = <2>; + fsl,tuning-start-tap = <20>; status = "disabled"; };
@@ -1208,6 +1212,8 @@ usdhc3: mmc@30b60000 { <&clks IMX7D_USDHC3_ROOT_CLK>; clock-names = "ipg", "ahb", "per"; bus-width = <4>; + fsl,tuning-step = <2>; + fsl,tuning-start-tap = <20>; status = "disabled"; };
@@ -1257,7 +1263,7 @@ fec1: ethernet@30be0000 { }; };
- dma_apbh: dma-apbh@33000000 { + dma_apbh: dma-controller@33000000 { compatible = "fsl,imx7d-dma-apbh", "fsl,imx28-dma-apbh"; reg = <0x33000000 0x2000>; interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi index d6b36f04f3dc..1a647d4072ba 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi @@ -1221,10 +1221,9 @@ mipi_csi: mipi-csi@32e30000 { compatible = "fsl,imx8mm-mipi-csi2"; reg = <0x32e30000 0x1000>; interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>; - assigned-clocks = <&clk IMX8MM_CLK_CSI1_CORE>, - <&clk IMX8MM_CLK_CSI1_PHY_REF>; - assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_1000M>, - <&clk IMX8MM_SYS_PLL2_1000M>; + assigned-clocks = <&clk IMX8MM_CLK_CSI1_CORE>; + assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_1000M>; + clock-frequency = <333000000>; clocks = <&clk IMX8MM_CLK_DISP_APB_ROOT>, <&clk IMX8MM_CLK_CSI1_ROOT>, diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi index e8d49660ac85..c0f49fedaf9e 100644 --- a/arch/arm64/boot/dts/freescale/imx93.dtsi +++ b/arch/arm64/boot/dts/freescale/imx93.dtsi @@ -306,7 +306,7 @@ mlmix: power-domain@44461800 {
anatop: anatop@44480000 { compatible = "fsl,imx93-anatop", "syscon"; - reg = <0x44480000 0x10000>; + reg = <0x44480000 0x2000>; };
adc1: adc@44530000 { diff --git a/arch/arm64/boot/dts/qcom/ipq5332.dtsi b/arch/arm64/boot/dts/qcom/ipq5332.dtsi index af4d97143bcf..c2d6cc65a323 100644 --- a/arch/arm64/boot/dts/qcom/ipq5332.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq5332.dtsi @@ -135,6 +135,13 @@ soc@0 { #size-cells = <1>; ranges = <0 0 0 0xffffffff>;
+ qfprom: efuse@a4000 { + compatible = "qcom,ipq5332-qfprom", "qcom,qfprom"; + reg = <0x000a4000 0x721>; + #address-cells = <1>; + #size-cells = <1>; + }; + rng: rng@e3000 { compatible = "qcom,prng-ee"; reg = <0x000e3000 0x1000>; diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts index dd924331b0ee..ec066a89436a 100644 --- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts +++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts @@ -121,7 +121,7 @@ active-config0 { }; };
- pm8150l-thermal { + pm8150l-pcb-thermal { polling-delay-passive = <0>; polling-delay = <0>; thermal-sensors = <&pm8150l_adc_tm 1>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts index 028eb508ae30..8bfd5f88d1ef 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts @@ -548,9 +548,8 @@ &saradc { &sdhci { max-frequency = <150000000>; bus-width = <8>; - mmc-hs400-1_8v; + mmc-hs200-1_8v; non-removable; - mmc-hs400-enhanced-strobe; status = "okay"; };
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi index 907071d4fe80..980c4534313a 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi @@ -45,7 +45,7 @@ led-0 { sdio_pwrseq: sdio-pwrseq { compatible = "mmc-pwrseq-simple"; clocks = <&rk808 1>; - clock-names = "ext_clock"; + clock-names = "lpo"; pinctrl-names = "default"; pinctrl-0 = <&wifi_enable_h>; reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>; @@ -645,9 +645,9 @@ &saradc { };
&sdhci { + max-frequency = <150000000>; bus-width = <8>; - mmc-hs400-1_8v; - mmc-hs400-enhanced-strobe; + mmc-hs200-1_8v; non-removable; status = "okay"; }; diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 67f2fb781f59..8df46f186c64 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -356,7 +356,7 @@ static inline int sme_max_virtualisable_vl(void) return vec_max_virtualisable_vl(ARM64_VEC_SME); }
-extern void sme_alloc(struct task_struct *task); +extern void sme_alloc(struct task_struct *task, bool flush); extern unsigned int sme_get_vl(void); extern int sme_set_current_vl(unsigned long arg); extern int sme_get_current_vl(void); @@ -388,7 +388,7 @@ static inline void sme_smstart_sm(void) { } static inline void sme_smstop_sm(void) { } static inline void sme_smstop(void) { }
-static inline void sme_alloc(struct task_struct *task) { } +static inline void sme_alloc(struct task_struct *task, bool flush) { } static inline void sme_setup(void) { } static inline unsigned int sme_get_vl(void) { return 0; } static inline int sme_max_vl(void) { return 0; } diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 75c37b1c55aa..087c05aa960e 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1285,9 +1285,9 @@ void fpsimd_release_task(struct task_struct *dead_task) * the interest of testability and predictability, the architecture * guarantees that when ZA is enabled it will be zeroed. */ -void sme_alloc(struct task_struct *task) +void sme_alloc(struct task_struct *task, bool flush) { - if (task->thread.sme_state) { + if (task->thread.sme_state && flush) { memset(task->thread.sme_state, 0, sme_state_size(task)); return; } @@ -1515,7 +1515,7 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs) }
sve_alloc(current, false); - sme_alloc(current); + sme_alloc(current, true); if (!current->thread.sve_state || !current->thread.sme_state) { force_sig(SIGKILL); return; diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 5b9b4305248b..187aa2b175b4 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -881,6 +881,13 @@ static int sve_set_common(struct task_struct *target, break; case ARM64_VEC_SME: target->thread.svcr |= SVCR_SM_MASK; + + /* + * Disable traps and ensure there is SME storage but + * preserve any currently set values in ZA/ZT. + */ + sme_alloc(target, false); + set_tsk_thread_flag(target, TIF_SME); break; default: WARN_ON_ONCE(1); @@ -1100,7 +1107,7 @@ static int za_set(struct task_struct *target, }
/* Allocate/reinit ZA storage */ - sme_alloc(target); + sme_alloc(target, true); if (!target->thread.sme_state) { ret = -ENOMEM; goto out; @@ -1170,8 +1177,13 @@ static int zt_set(struct task_struct *target, if (!system_supports_sme2()) return -EINVAL;
+ /* Ensure SVE storage in case this is first use of SME */ + sve_alloc(target, false); + if (!target->thread.sve_state) + return -ENOMEM; + if (!thread_za_enabled(&target->thread)) { - sme_alloc(target); + sme_alloc(target, true); if (!target->thread.sme_state) return -ENOMEM; } @@ -1179,8 +1191,10 @@ static int zt_set(struct task_struct *target, ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, thread_zt_state(&target->thread), 0, ZT_SIG_REG_BYTES); - if (ret == 0) + if (ret == 0) { target->thread.svcr |= SVCR_ZA_MASK; + set_tsk_thread_flag(target, TIF_SME); + }
fpsimd_flush_task_state(target);
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 10b407672c42..bcd1ebb21da6 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -474,7 +474,7 @@ static int restore_za_context(struct user_ctxs *user) fpsimd_flush_task_state(current); /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
- sme_alloc(current); + sme_alloc(current, true); if (!current->thread.sme_state) { current->thread.svcr &= ~SVCR_ZA_MASK; clear_thread_flag(TIF_SME); diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 0e5ebfe8d9d2..ae03b8679696 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -25,6 +25,7 @@ #include <asm/traps.h> #include <asm/thread_info.h> #include <asm/alternative.h> +#include <asm/spinlock_types.h>
#include <linux/linkage.h> #include <linux/pgtable.h> @@ -406,7 +407,7 @@ LDREG 0(\ptp),\pte bb,<,n \pte,_PAGE_PRESENT_BIT,3f b \fault - stw \spc,0(\tmp) + stw \tmp1,0(\tmp) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif 2: LDREG 0(\ptp),\pte @@ -415,24 +416,22 @@ .endm
/* Release page_table_lock without reloading lock address. - Note that the values in the register spc are limited to - NR_SPACE_IDS (262144). Thus, the stw instruction always - stores a nonzero value even when register spc is 64 bits. We use an ordered store to ensure all prior accesses are performed prior to releasing the lock. */ - .macro ptl_unlock0 spc,tmp + .macro ptl_unlock0 spc,tmp,tmp2 #ifdef CONFIG_TLB_PTLOCK -98: or,COND(=) %r0,\spc,%r0 - stw,ma \spc,0(\tmp) +98: ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2 + or,COND(=) %r0,\spc,%r0 + stw,ma \tmp2,0(\tmp) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif .endm
/* Release page_table_lock. */ - .macro ptl_unlock1 spc,tmp + .macro ptl_unlock1 spc,tmp,tmp2 #ifdef CONFIG_TLB_PTLOCK 98: get_ptl \tmp - ptl_unlock0 \spc,\tmp + ptl_unlock0 \spc,\tmp,\tmp2 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif .endm @@ -1125,7 +1124,7 @@ dtlb_miss_20w: idtlbt pte,prot
- ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop
@@ -1151,7 +1150,7 @@ nadtlb_miss_20w:
idtlbt pte,prot
- ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop
@@ -1185,7 +1184,7 @@ dtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop
@@ -1218,7 +1217,7 @@ nadtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop
@@ -1247,7 +1246,7 @@ dtlb_miss_20:
idtlbt pte,prot
- ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop
@@ -1275,7 +1274,7 @@ nadtlb_miss_20: idtlbt pte,prot
- ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop
@@ -1320,7 +1319,7 @@ itlb_miss_20w: iitlbt pte,prot
- ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop
@@ -1344,7 +1343,7 @@ naitlb_miss_20w:
iitlbt pte,prot
- ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop
@@ -1378,7 +1377,7 @@ itlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop
@@ -1402,7 +1401,7 @@ naitlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop
@@ -1432,7 +1431,7 @@ itlb_miss_20:
iitlbt pte,prot
- ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop
@@ -1452,7 +1451,7 @@ naitlb_miss_20:
iitlbt pte,prot
- ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop
@@ -1482,7 +1481,7 @@ dbit_trap_20w: idtlbt pte,prot
- ptl_unlock0 spc,t0 + ptl_unlock0 spc,t0,t1 rfir nop #else @@ -1508,7 +1507,7 @@ dbit_trap_11:
mtsp t1, %sr1 /* Restore sr1 */
- ptl_unlock0 spc,t0 + ptl_unlock0 spc,t0,t1 rfir nop
@@ -1528,7 +1527,7 @@ dbit_trap_20: idtlbt pte,prot
- ptl_unlock0 spc,t0 + ptl_unlock0 spc,t0,t1 rfir nop #endif diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 4caf5e3079eb..359577ec1680 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -709,9 +709,9 @@ static int __init rtas_flash_init(void) if (!rtas_validate_flash_data.buf) return -ENOMEM;
- flash_block_cache = kmem_cache_create("rtas_flash_cache", - RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0, - NULL); + flash_block_cache = kmem_cache_create_usercopy("rtas_flash_cache", + RTAS_BLK_SIZE, RTAS_BLK_SIZE, + 0, 0, RTAS_BLK_SIZE, NULL); if (!flash_block_cache) { printk(KERN_ERR "%s: failed to create block cache\n", __func__); diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile index 699eeffd9f55..f9522fd70b2f 100644 --- a/arch/powerpc/mm/kasan/Makefile +++ b/arch/powerpc/mm/kasan/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0
KASAN_SANITIZE := n +KCOV_INSTRUMENT := n
obj-$(CONFIG_PPC32) += init_32.o obj-$(CONFIG_PPC_8xx) += 8xx.o diff --git a/arch/riscv/include/asm/insn.h b/arch/riscv/include/asm/insn.h index 8d5c84f2d5ef..603095c913e3 100644 --- a/arch/riscv/include/asm/insn.h +++ b/arch/riscv/include/asm/insn.h @@ -110,6 +110,7 @@ #define RVC_INSN_FUNCT4_OPOFF 12 #define RVC_INSN_FUNCT3_MASK GENMASK(15, 13) #define RVC_INSN_FUNCT3_OPOFF 13 +#define RVC_INSN_J_RS1_MASK GENMASK(11, 7) #define RVC_INSN_J_RS2_MASK GENMASK(6, 2) #define RVC_INSN_OPCODE_MASK GENMASK(1, 0) #define RVC_ENCODE_FUNCT3(f_) (RVC_FUNCT3_##f_ << RVC_INSN_FUNCT3_OPOFF) @@ -225,8 +226,6 @@ __RISCV_INSN_FUNCS(c_jal, RVC_MASK_C_JAL, RVC_MATCH_C_JAL) __RISCV_INSN_FUNCS(auipc, RVG_MASK_AUIPC, RVG_MATCH_AUIPC) __RISCV_INSN_FUNCS(jalr, RVG_MASK_JALR, RVG_MATCH_JALR) __RISCV_INSN_FUNCS(jal, RVG_MASK_JAL, RVG_MATCH_JAL) -__RISCV_INSN_FUNCS(c_jr, RVC_MASK_C_JR, RVC_MATCH_C_JR) -__RISCV_INSN_FUNCS(c_jalr, RVC_MASK_C_JALR, RVC_MATCH_C_JALR) __RISCV_INSN_FUNCS(c_j, RVC_MASK_C_J, RVC_MATCH_C_J) __RISCV_INSN_FUNCS(beq, RVG_MASK_BEQ, RVG_MATCH_BEQ) __RISCV_INSN_FUNCS(bne, RVG_MASK_BNE, RVG_MATCH_BNE) @@ -253,6 +252,18 @@ static __always_inline bool riscv_insn_is_branch(u32 code) return (code & RV_INSN_OPCODE_MASK) == RVG_OPCODE_BRANCH; }
+static __always_inline bool riscv_insn_is_c_jr(u32 code) +{ + return (code & RVC_MASK_C_JR) == RVC_MATCH_C_JR && + (code & RVC_INSN_J_RS1_MASK) != 0; +} + +static __always_inline bool riscv_insn_is_c_jalr(u32 code) +{ + return (code & RVC_MASK_C_JALR) == RVC_MATCH_C_JALR && + (code & RVC_INSN_J_RS1_MASK) != 0; +} + #define RV_IMM_SIGN(x) (-(((x) >> 31) & 1)) #define RVC_IMM_SIGN(x) (-(((x) >> 12) & 1)) #define RV_X(X, s, mask) (((X) >> (s)) & (mask)) diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 8c258b78c925..bd19e885dcec 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -268,16 +268,16 @@ asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs) asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs) { if (user_mode(regs)) { - ulong syscall = regs->a7; + long syscall = regs->a7;
regs->epc += 4; regs->orig_a0 = regs->a0;
syscall = syscall_enter_from_user_mode(regs, syscall);
- if (syscall < NR_syscalls) + if (syscall >= 0 && syscall < NR_syscalls) syscall_handler(regs, syscall); - else + else if (syscall != -1) regs->a0 = -ENOSYS;
syscall_exit_to_user_mode(regs); diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S index ec486e5369d9..09b47ebacf2e 100644 --- a/arch/riscv/lib/uaccess.S +++ b/arch/riscv/lib/uaccess.S @@ -17,8 +17,11 @@ ENTRY(__asm_copy_from_user) li t6, SR_SUM csrs CSR_STATUS, t6
- /* Save for return value */ - mv t5, a2 + /* + * Save the terminal address which will be used to compute the number + * of bytes copied in case of a fixup exception. + */ + add t5, a0, a2
/* * Register allocation for code below: @@ -176,7 +179,7 @@ ENTRY(__asm_copy_from_user) 10: /* Disable access to user memory */ csrc CSR_STATUS, t6 - mv a0, t5 + sub a0, t5, a0 ret ENDPROC(__asm_copy_to_user) ENDPROC(__asm_copy_from_user) @@ -228,7 +231,7 @@ ENTRY(__clear_user) 11: /* Disable access to user memory */ csrc CSR_STATUS, t6 - mv a0, a1 + sub a0, a3, a0 ret ENDPROC(__clear_user) EXPORT_SYMBOL(__clear_user) diff --git a/arch/um/os-Linux/user_syms.c b/arch/um/os-Linux/user_syms.c index 9b62a9d352b3..a310ae27b479 100644 --- a/arch/um/os-Linux/user_syms.c +++ b/arch/um/os-Linux/user_syms.c @@ -37,13 +37,6 @@ EXPORT_SYMBOL(vsyscall_ehdr); EXPORT_SYMBOL(vsyscall_end); #endif
-/* Export symbols used by GCC for the stack protector. */ -extern void __stack_smash_handler(void *) __attribute__((weak)); -EXPORT_SYMBOL(__stack_smash_handler); - -extern long __guard __attribute__((weak)); -EXPORT_SYMBOL(__guard); - #ifdef _FORTIFY_SOURCE extern int __sprintf_chk(char *str, int flag, size_t len, const char *format); EXPORT_SYMBOL(__sprintf_chk); diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h index 117903881fe4..ce8f50192ae3 100644 --- a/arch/x86/include/asm/entry-common.h +++ b/arch/x86/include/asm/entry-common.h @@ -92,6 +92,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, static __always_inline void arch_exit_to_user_mode(void) { mds_user_clear_cpu_buffers(); + amd_clear_divider(); } #define arch_exit_to_user_mode arch_exit_to_user_mode
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index e1e7b319fe78..8da84e1e5658 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -268,9 +268,9 @@ .endm
#ifdef CONFIG_CPU_UNRET_ENTRY -#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret" +#define CALL_UNTRAIN_RET "call entry_untrain_ret" #else -#define CALL_ZEN_UNTRAIN_RET "" +#define CALL_UNTRAIN_RET "" #endif
/* @@ -278,7 +278,7 @@ * return thunk isn't mapped into the userspace tables (then again, AMD * typically has NO_MELTDOWN). * - * While zen_untrain_ret() doesn't clobber anything but requires stack, + * While retbleed_untrain_ret() doesn't clobber anything but requires stack, * entry_ibpb() will clobber AX, CX, DX. * * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point @@ -289,14 +289,20 @@ defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO) VALIDATE_UNRET_END ALTERNATIVE_3 "", \ - CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ + CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH #endif +.endm
-#ifdef CONFIG_CPU_SRSO - ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ - "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS +.macro UNTRAIN_RET_VM +#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ + defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO) + VALIDATE_UNRET_END + ALTERNATIVE_3 "", \ + CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ + "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT, \ + __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH #endif .endm
@@ -305,15 +311,10 @@ defined(CONFIG_CALL_DEPTH_TRACKING) VALIDATE_UNRET_END ALTERNATIVE_3 "", \ - CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ + CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ __stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH #endif - -#ifdef CONFIG_CPU_SRSO - ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ - "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS -#endif .endm
@@ -337,17 +338,24 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[]; extern retpoline_thunk_t __x86_indirect_call_thunk_array[]; extern retpoline_thunk_t __x86_indirect_jump_thunk_array[];
+#ifdef CONFIG_RETHUNK extern void __x86_return_thunk(void); -extern void zen_untrain_ret(void); +#else +static inline void __x86_return_thunk(void) {} +#endif + +extern void retbleed_return_thunk(void); +extern void srso_return_thunk(void); +extern void srso_alias_return_thunk(void); + +extern void retbleed_untrain_ret(void); extern void srso_untrain_ret(void); -extern void srso_untrain_ret_alias(void); +extern void srso_alias_untrain_ret(void); + +extern void entry_untrain_ret(void); extern void entry_ibpb(void);
-#ifdef CONFIG_CALL_THUNKS extern void (*x86_return_thunk)(void); -#else -#define x86_return_thunk (&__x86_return_thunk) -#endif
#ifdef CONFIG_CALL_DEPTH_TRACKING extern void __x86_return_skl(void); @@ -474,9 +482,6 @@ enum ssb_mitigation { SPEC_STORE_BYPASS_SECCOMP, };
-extern char __indirect_thunk_start[]; -extern char __indirect_thunk_end[]; - static __always_inline void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) { diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index f615e0cb6d93..94b42fbb6ffa 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -571,10 +571,6 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
#ifdef CONFIG_RETHUNK
-#ifdef CONFIG_CALL_THUNKS -void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; -#endif - /* * Rewrite the compiler generated return thunk tail-calls. * diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 0b5f33cb32b5..13b0da82cb5f 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1329,3 +1329,4 @@ void noinstr amd_clear_divider(void) asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) :: "a" (0), "d" (0), "r" (1)); } +EXPORT_SYMBOL_GPL(amd_clear_divider); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index f3d627901d89..d5319779da58 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -63,6 +63,8 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd);
static DEFINE_MUTEX(spec_ctrl_mutex);
+void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; + /* Update SPEC_CTRL MSR and its cached copy unconditionally */ static void update_spec_ctrl(u64 val) { @@ -165,8 +167,13 @@ void __init cpu_select_mitigations(void) md_clear_select_mitigation(); srbds_select_mitigation(); l1d_flush_select_mitigation(); - gds_select_mitigation(); + + /* + * srso_select_mitigation() depends and must run after + * retbleed_select_mitigation(). + */ srso_select_mitigation(); + gds_select_mitigation(); }
/* @@ -1035,6 +1042,9 @@ static void __init retbleed_select_mitigation(void) setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_UNRET);
+ if (IS_ENABLED(CONFIG_RETHUNK)) + x86_return_thunk = retbleed_return_thunk; + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) pr_err(RETBLEED_UNTRAIN_MSG); @@ -1044,6 +1054,7 @@ static void __init retbleed_select_mitigation(void)
case RETBLEED_MITIGATION_IBPB: setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); + setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); mitigate_smt = true; break;
@@ -2417,9 +2428,10 @@ static void __init srso_select_mitigation(void) * Zen1/2 with SMT off aren't vulnerable after the right * IBPB microcode has been applied. */ - if ((boot_cpu_data.x86 < 0x19) && - (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) + if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { setup_force_cpu_cap(X86_FEATURE_SRSO_NO); + return; + } }
if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { @@ -2448,11 +2460,15 @@ static void __init srso_select_mitigation(void) * like ftrace, static_call, etc. */ setup_force_cpu_cap(X86_FEATURE_RETHUNK); + setup_force_cpu_cap(X86_FEATURE_UNRET);
- if (boot_cpu_data.x86 == 0x19) + if (boot_cpu_data.x86 == 0x19) { setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); - else + x86_return_thunk = srso_alias_return_thunk; + } else { setup_force_cpu_cap(X86_FEATURE_SRSO); + x86_return_thunk = srso_return_thunk; + } srso_mitigation = SRSO_MITIGATION_SAFE_RET; } else { pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); @@ -2701,6 +2717,9 @@ static ssize_t gds_show_state(char *buf)
static ssize_t srso_show_state(char *buf) { + if (boot_cpu_has(X86_FEATURE_SRSO_NO)) + return sysfs_emit(buf, "Mitigation: SMT disabled\n"); + return sysfs_emit(buf, "%s%s\n", srso_strings[srso_mitigation], (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode")); diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 57b0037d0a99..517821b48391 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -226,7 +226,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real) }
/* Check whether insn is indirect jump */ -static int __insn_is_indirect_jump(struct insn *insn) +static int insn_is_indirect_jump(struct insn *insn) { return ((insn->opcode.bytes[0] == 0xff && (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ @@ -260,26 +260,6 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) return (start <= target && target <= start + len); }
-static int insn_is_indirect_jump(struct insn *insn) -{ - int ret = __insn_is_indirect_jump(insn); - -#ifdef CONFIG_RETPOLINE - /* - * Jump to x86_indirect_thunk_* is treated as an indirect jump. - * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with - * older gcc may use indirect jump. So we add this check instead of - * replace indirect-jump check. - */ - if (!ret) - ret = insn_jump_into_range(insn, - (unsigned long)__indirect_thunk_start, - (unsigned long)__indirect_thunk_end - - (unsigned long)__indirect_thunk_start); -#endif - return ret; -} - /* Decode whole function to ensure any instructions don't jump into target */ static int can_optimize(unsigned long paddr) { @@ -334,9 +314,21 @@ static int can_optimize(unsigned long paddr) /* Recover address */ insn.kaddr = (void *)addr; insn.next_byte = (void *)(addr + insn.length); - /* Check any instructions don't jump into target */ - if (insn_is_indirect_jump(&insn) || - insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE, + /* + * Check any instructions don't jump into target, indirectly or + * directly. + * + * The indirect case is present to handle a code with jump + * tables. When the kernel uses retpolines, the check should in + * theory additionally look for jumps to indirect thunks. + * However, the kernel built with retpolines or IBT has jump + * tables disabled so the check can be skipped altogether. + */ + if (!IS_ENABLED(CONFIG_RETPOLINE) && + !IS_ENABLED(CONFIG_X86_KERNEL_IBT) && + insn_is_indirect_jump(&insn)) + return 0; + if (insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE, DISP32_SIZE)) return 0; addr += insn.length; diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c index b70670a98597..77a9316da435 100644 --- a/arch/x86/kernel/static_call.c +++ b/arch/x86/kernel/static_call.c @@ -186,6 +186,19 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform); */ bool __static_call_fixup(void *tramp, u8 op, void *dest) { + unsigned long addr = (unsigned long)tramp; + /* + * Not all .return_sites are a static_call trampoline (most are not). + * Check if the 3 bytes after the return are still kernel text, if not, + * then this definitely is not a trampoline and we need not worry + * further. + * + * This avoids the memcmp() below tripping over pagefaults etc.. + */ + if (((addr >> PAGE_SHIFT) != ((addr + 7) >> PAGE_SHIFT)) && + !kernel_text_address(addr + 7)) + return false; + if (memcmp(tramp+5, tramp_ud, 3)) { /* Not a trampoline site, not our problem. */ return false; diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 1885326a8f65..4a817d20ce3b 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -206,8 +206,6 @@ DEFINE_IDTENTRY(exc_divide_error) { do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, FPE_INTDIV, error_get_trap_addr(regs)); - - amd_clear_divider(); }
DEFINE_IDTENTRY(exc_overflow) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index bac2e2949f01..83d41c2601d7 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -133,27 +133,25 @@ SECTIONS KPROBES_TEXT SOFTIRQENTRY_TEXT #ifdef CONFIG_RETPOLINE - __indirect_thunk_start = .; - *(.text.__x86.indirect_thunk) - *(.text.__x86.return_thunk) - __indirect_thunk_end = .; + *(.text..__x86.indirect_thunk) + *(.text..__x86.return_thunk) #endif STATIC_CALL_TEXT
ALIGN_ENTRY_TEXT_BEGIN #ifdef CONFIG_CPU_SRSO - *(.text.__x86.rethunk_untrain) + *(.text..__x86.rethunk_untrain) #endif
ENTRY_TEXT
#ifdef CONFIG_CPU_SRSO /* - * See the comment above srso_untrain_ret_alias()'s + * See the comment above srso_alias_untrain_ret()'s * definition. */ - . = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); - *(.text.__x86.rethunk_safe) + . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); + *(.text..__x86.rethunk_safe) #endif ALIGN_ENTRY_TEXT_END *(.gnu.warning) @@ -522,8 +520,8 @@ INIT_PER_CPU(irq_stack_backing_store); "fixed_percpu_data is not at start of per-cpu area"); #endif
- #ifdef CONFIG_RETHUNK -. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned"); +#ifdef CONFIG_RETHUNK +. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned"); . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); #endif
@@ -538,8 +536,8 @@ INIT_PER_CPU(irq_stack_backing_store); * Instead do: (A | B) - (A & B) in order to compute the XOR * of the two function addresses: */ -. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) - - (ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), +. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) - + (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), "SRSO function pair won't alias"); #endif
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index af7b968f5570..c3b557aca249 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4034,6 +4034,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
guest_state_enter_irqoff();
+ amd_clear_divider(); + if (sev_es_guest(vcpu->kvm)) __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted); else diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 265452fc9ebe..ef2ebabb059c 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -222,10 +222,7 @@ SYM_FUNC_START(__svm_vcpu_run) * because interrupt handlers won't sanitize 'ret' if the return is * from the kernel. */ - UNTRAIN_RET - - /* SRSO */ - ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT + UNTRAIN_RET_VM
/* * Clear all general purpose registers except RSP and RAX to prevent @@ -362,7 +359,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) * because interrupt handlers won't sanitize RET if the return is * from the kernel. */ - UNTRAIN_RET + UNTRAIN_RET_VM
/* "Pop" @spec_ctrl_intercepted. */ pop %_ASM_BX diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 2cff585f22f2..cd86aeb5fdd3 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -13,7 +13,7 @@ #include <asm/frame.h> #include <asm/nops.h>
- .section .text.__x86.indirect_thunk + .section .text..__x86.indirect_thunk
.macro POLINE reg @@ -133,75 +133,106 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) #ifdef CONFIG_RETHUNK
/* - * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at + * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at * special addresses: * - * - srso_untrain_ret_alias() is 2M aligned - * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14 + * - srso_alias_untrain_ret() is 2M aligned + * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14 * and 20 in its virtual address are set (while those bits in the - * srso_untrain_ret_alias() function are cleared). + * srso_alias_untrain_ret() function are cleared). * * This guarantees that those two addresses will alias in the branch * target buffer of Zen3/4 generations, leading to any potential * poisoned entries at that BTB slot to get evicted. * - * As a result, srso_safe_ret_alias() becomes a safe return. + * As a result, srso_alias_safe_ret() becomes a safe return. */ #ifdef CONFIG_CPU_SRSO - .section .text.__x86.rethunk_untrain + .section .text..__x86.rethunk_untrain
-SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) +SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + UNWIND_HINT_FUNC ANNOTATE_NOENDBR ASM_NOP2 lfence - jmp __x86_return_thunk -SYM_FUNC_END(srso_untrain_ret_alias) -__EXPORT_THUNK(srso_untrain_ret_alias) - - .section .text.__x86.rethunk_safe + jmp srso_alias_return_thunk +SYM_FUNC_END(srso_alias_untrain_ret) +__EXPORT_THUNK(srso_alias_untrain_ret) + + .section .text..__x86.rethunk_safe +#else +/* dummy definition for alternatives */ +SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + ANNOTATE_UNRET_SAFE + ret + int3 +SYM_FUNC_END(srso_alias_untrain_ret) #endif
-/* Needs a definition for the __x86_return_thunk alternative below. */ -SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) -#ifdef CONFIG_CPU_SRSO - add $8, %_ASM_SP +SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE) + lea 8(%_ASM_SP), %_ASM_SP UNWIND_HINT_FUNC -#endif ANNOTATE_UNRET_SAFE ret int3 -SYM_FUNC_END(srso_safe_ret_alias) +SYM_FUNC_END(srso_alias_safe_ret)
- .section .text.__x86.return_thunk + .section .text..__x86.return_thunk + +SYM_CODE_START(srso_alias_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + call srso_alias_safe_ret + ud2 +SYM_CODE_END(srso_alias_return_thunk) + +/* + * Some generic notes on the untraining sequences: + * + * They are interchangeable when it comes to flushing potentially wrong + * RET predictions from the BTB. + * + * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the + * Retbleed sequence because the return sequence done there + * (srso_safe_ret()) is longer and the return sequence must fully nest + * (end before) the untraining sequence. Therefore, the untraining + * sequence must fully overlap the return sequence. + * + * Regarding alignment - the instructions which need to be untrained, + * must all start at a cacheline boundary for Zen1/2 generations. That + * is, instruction sequences starting at srso_safe_ret() and + * the respective instruction sequences at retbleed_return_thunk() + * must start at a cacheline boundary. + */
/* * Safety details here pertain to the AMD Zen{1,2} microarchitecture: - * 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for + * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for * alignment within the BTB. - * 2) The instruction at zen_untrain_ret must contain, and not + * 2) The instruction at retbleed_untrain_ret must contain, and not * end with, the 0xc3 byte of the RET. * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread * from re-poisioning the BTB prediction. */ .align 64 - .skip 64 - (__ret - zen_untrain_ret), 0xcc -SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + .skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc +SYM_START(retbleed_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_NOENDBR /* - * As executed from zen_untrain_ret, this is: + * As executed from retbleed_untrain_ret, this is: * * TEST $0xcc, %bl * LFENCE - * JMP __x86_return_thunk + * JMP retbleed_return_thunk * * Executing the TEST instruction has a side effect of evicting any BTB * prediction (potentially attacker controlled) attached to the RET, as - * __x86_return_thunk + 1 isn't an instruction boundary at the moment. + * retbleed_return_thunk + 1 isn't an instruction boundary at the moment. */ .byte 0xf6
/* - * As executed from __x86_return_thunk, this is a plain RET. + * As executed from retbleed_return_thunk, this is a plain RET. * * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8. * @@ -213,13 +244,13 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) * With SMT enabled and STIBP active, a sibling thread cannot poison * RET's prediction to a type of its choice, but can evict the * prediction due to competitive sharing. If the prediction is - * evicted, __x86_return_thunk will suffer Straight Line Speculation + * evicted, retbleed_return_thunk will suffer Straight Line Speculation * which will be contained safely by the INT3. */ -SYM_INNER_LABEL(__ret, SYM_L_GLOBAL) +SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL) ret int3 -SYM_CODE_END(__ret) +SYM_CODE_END(retbleed_return_thunk)
/* * Ensure the TEST decoding / BTB invalidation is complete. @@ -230,16 +261,16 @@ SYM_CODE_END(__ret) * Jump back and execute the RET in the middle of the TEST instruction. * INT3 is for SLS protection. */ - jmp __ret + jmp retbleed_return_thunk int3 -SYM_FUNC_END(zen_untrain_ret) -__EXPORT_THUNK(zen_untrain_ret) +SYM_FUNC_END(retbleed_untrain_ret) +__EXPORT_THUNK(retbleed_untrain_ret)
/* - * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret() + * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret() * above. On kernel entry, srso_untrain_ret() is executed which is a * - * movabs $0xccccccc308c48348,%rax + * movabs $0xccccc30824648d48,%rax * * and when the return thunk executes the inner label srso_safe_ret() * later, it is a stack manipulation and a RET which is mispredicted and @@ -251,22 +282,44 @@ SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_NOENDBR .byte 0x48, 0xb8
+/* + * This forces the function return instruction to speculate into a trap + * (UD2 in srso_return_thunk() below). This RET will then mispredict + * and execution will continue at the return site read from the top of + * the stack. + */ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) - add $8, %_ASM_SP + lea 8(%_ASM_SP), %_ASM_SP ret int3 int3 - int3 + /* end of movabs */ lfence call srso_safe_ret - int3 + ud2 SYM_CODE_END(srso_safe_ret) SYM_FUNC_END(srso_untrain_ret) __EXPORT_THUNK(srso_untrain_ret)
-SYM_FUNC_START(__x86_return_thunk) - ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \ - "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS +SYM_CODE_START(srso_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + call srso_safe_ret + ud2 +SYM_CODE_END(srso_return_thunk) + +SYM_FUNC_START(entry_untrain_ret) + ALTERNATIVE_2 "jmp retbleed_untrain_ret", \ + "jmp srso_untrain_ret", X86_FEATURE_SRSO, \ + "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS +SYM_FUNC_END(entry_untrain_ret) +__EXPORT_THUNK(entry_untrain_ret) + +SYM_CODE_START(__x86_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + ANNOTATE_UNRET_SAFE + ret int3 SYM_CODE_END(__x86_return_thunk) EXPORT_SYMBOL(__x86_return_thunk) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index fc49be622e05..9faafcd10e17 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -136,7 +136,9 @@ static void blkg_free_workfn(struct work_struct *work) blkcg_policy[i]->pd_free_fn(blkg->pd[i]); if (blkg->parent) blkg_put(blkg->parent); + spin_lock_irq(&q->queue_lock); list_del_init(&blkg->q_node); + spin_unlock_irq(&q->queue_lock); mutex_unlock(&q->blkcg_mutex);
blk_put_queue(q); diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index ad9844c5b40c..e6468eab2681 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -78,7 +78,7 @@ static struct blk_crypto_fallback_keyslot { struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX]; } *blk_crypto_keyslots;
-static struct blk_crypto_profile blk_crypto_fallback_profile; +static struct blk_crypto_profile *blk_crypto_fallback_profile; static struct workqueue_struct *blk_crypto_wq; static mempool_t *blk_crypto_bounce_page_pool; static struct bio_set crypto_bio_split; @@ -292,7 +292,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr) * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for * this bio's algorithm and key. */ - blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile, + blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile, bc->bc_key, &slot); if (blk_st != BLK_STS_OK) { src_bio->bi_status = blk_st; @@ -395,7 +395,7 @@ static void blk_crypto_fallback_decrypt_bio(struct work_struct *work) * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for * this bio's algorithm and key. */ - blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile, + blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile, bc->bc_key, &slot); if (blk_st != BLK_STS_OK) { bio->bi_status = blk_st; @@ -499,7 +499,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr) return false; }
- if (!__blk_crypto_cfg_supported(&blk_crypto_fallback_profile, + if (!__blk_crypto_cfg_supported(blk_crypto_fallback_profile, &bc->bc_key->crypto_cfg)) { bio->bi_status = BLK_STS_NOTSUPP; return false; @@ -526,7 +526,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) { - return __blk_crypto_evict_key(&blk_crypto_fallback_profile, key); + return __blk_crypto_evict_key(blk_crypto_fallback_profile, key); }
static bool blk_crypto_fallback_inited; @@ -534,7 +534,6 @@ static int blk_crypto_fallback_init(void) { int i; int err; - struct blk_crypto_profile *profile = &blk_crypto_fallback_profile;
if (blk_crypto_fallback_inited) return 0; @@ -545,18 +544,27 @@ static int blk_crypto_fallback_init(void) if (err) goto out;
- err = blk_crypto_profile_init(profile, blk_crypto_num_keyslots); - if (err) + /* Dynamic allocation is needed because of lockdep_register_key(). */ + blk_crypto_fallback_profile = + kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL); + if (!blk_crypto_fallback_profile) { + err = -ENOMEM; goto fail_free_bioset; + } + + err = blk_crypto_profile_init(blk_crypto_fallback_profile, + blk_crypto_num_keyslots); + if (err) + goto fail_free_profile; err = -ENOMEM;
- profile->ll_ops = blk_crypto_fallback_ll_ops; - profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; + blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops; + blk_crypto_fallback_profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
/* All blk-crypto modes have a crypto API fallback. */ for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) - profile->modes_supported[i] = 0xFFFFFFFF; - profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; + blk_crypto_fallback_profile->modes_supported[i] = 0xFFFFFFFF; + blk_crypto_fallback_profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
blk_crypto_wq = alloc_workqueue("blk_crypto_wq", WQ_UNBOUND | WQ_HIGHPRI | @@ -597,7 +605,9 @@ static int blk_crypto_fallback_init(void) fail_free_wq: destroy_workqueue(blk_crypto_wq); fail_destroy_profile: - blk_crypto_profile_destroy(profile); + blk_crypto_profile_destroy(blk_crypto_fallback_profile); +fail_free_profile: + kfree(blk_crypto_fallback_profile); fail_free_bioset: bioset_exit(&crypto_bio_split); out: diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c index fabfc501ef54..a39dd346a167 100644 --- a/drivers/accel/habanalabs/common/device.c +++ b/drivers/accel/habanalabs/common/device.c @@ -981,6 +981,18 @@ static void device_early_fini(struct hl_device *hdev) hdev->asic_funcs->early_fini(hdev); }
+static bool is_pci_link_healthy(struct hl_device *hdev) +{ + u16 vendor_id; + + if (!hdev->pdev) + return false; + + pci_read_config_word(hdev->pdev, PCI_VENDOR_ID, &vendor_id); + + return (vendor_id == PCI_VENDOR_ID_HABANALABS); +} + static void hl_device_heartbeat(struct work_struct *work) { struct hl_device *hdev = container_of(work, struct hl_device, @@ -995,7 +1007,8 @@ static void hl_device_heartbeat(struct work_struct *work) goto reschedule;
if (hl_device_operational(hdev, NULL)) - dev_err(hdev->dev, "Device heartbeat failed!\n"); + dev_err(hdev->dev, "Device heartbeat failed! PCI link is %s\n", + is_pci_link_healthy(hdev) ? "healthy" : "broken");
info.err_type = HL_INFO_FW_HEARTBEAT_ERR; info.event_mask = &event_mask; diff --git a/drivers/accel/habanalabs/common/habanalabs.h b/drivers/accel/habanalabs/common/habanalabs.h index eaae69a9f817..7f5d1b6e3fb0 100644 --- a/drivers/accel/habanalabs/common/habanalabs.h +++ b/drivers/accel/habanalabs/common/habanalabs.h @@ -36,6 +36,8 @@ struct hl_device; struct hl_fpriv;
+#define PCI_VENDOR_ID_HABANALABS 0x1da3 + /* Use upper bits of mmap offset to store habana driver specific information. * bits[63:59] - Encode mmap type * bits[45:0] - mmap offset value diff --git a/drivers/accel/habanalabs/common/habanalabs_drv.c b/drivers/accel/habanalabs/common/habanalabs_drv.c index d9df64e75f33..70fb2df9a93b 100644 --- a/drivers/accel/habanalabs/common/habanalabs_drv.c +++ b/drivers/accel/habanalabs/common/habanalabs_drv.c @@ -13,6 +13,7 @@
#include <linux/pci.h> #include <linux/module.h> +#include <linux/vmalloc.h>
#define CREATE_TRACE_POINTS #include <trace/events/habanalabs.h> @@ -54,8 +55,6 @@ module_param(boot_error_status_mask, ulong, 0444); MODULE_PARM_DESC(boot_error_status_mask, "Mask of the error status during device CPU boot (If bitX is cleared then error X is masked. Default all 1's)");
-#define PCI_VENDOR_ID_HABANALABS 0x1da3 - #define PCI_IDS_GOYA 0x0001 #define PCI_IDS_GAUDI 0x1000 #define PCI_IDS_GAUDI_SEC 0x1010 @@ -220,6 +219,7 @@ int hl_device_open(struct inode *inode, struct file *filp)
hl_debugfs_add_file(hpriv);
+ vfree(hdev->captured_err_info.page_fault_info.user_mappings); memset(&hdev->captured_err_info, 0, sizeof(hdev->captured_err_info)); atomic_set(&hdev->captured_err_info.cs_timeout.write_enable, 1); hdev->captured_err_info.undef_opcode.write_enable = true; diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c index cfbc92da426f..388abd40024b 100644 --- a/drivers/accel/qaic/qaic_control.c +++ b/drivers/accel/qaic/qaic_control.c @@ -392,18 +392,31 @@ static int find_and_map_user_pages(struct qaic_device *qdev, struct qaic_manage_trans_dma_xfer *in_trans, struct ioctl_resources *resources, struct dma_xfer *xfer) { + u64 xfer_start_addr, remaining, end, total; unsigned long need_pages; struct page **page_list; unsigned long nr_pages; struct sg_table *sgt; - u64 xfer_start_addr; int ret; int i;
- xfer_start_addr = in_trans->addr + resources->xferred_dma_size; + if (check_add_overflow(in_trans->addr, resources->xferred_dma_size, &xfer_start_addr)) + return -EINVAL;
- need_pages = DIV_ROUND_UP(in_trans->size + offset_in_page(xfer_start_addr) - - resources->xferred_dma_size, PAGE_SIZE); + if (in_trans->size < resources->xferred_dma_size) + return -EINVAL; + remaining = in_trans->size - resources->xferred_dma_size; + if (remaining == 0) + return 0; + + if (check_add_overflow(xfer_start_addr, remaining, &end)) + return -EINVAL; + + total = remaining + offset_in_page(xfer_start_addr); + if (total >= SIZE_MAX) + return -EINVAL; + + need_pages = DIV_ROUND_UP(total, PAGE_SIZE);
nr_pages = need_pages;
@@ -435,7 +448,7 @@ static int find_and_map_user_pages(struct qaic_device *qdev,
ret = sg_alloc_table_from_pages(sgt, page_list, nr_pages, offset_in_page(xfer_start_addr), - in_trans->size - resources->xferred_dma_size, GFP_KERNEL); + remaining, GFP_KERNEL); if (ret) { ret = -ENOMEM; goto free_sgt; @@ -566,9 +579,6 @@ static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list QAIC_MANAGE_EXT_MSG_LENGTH) return -ENOMEM;
- if (in_trans->addr + in_trans->size < in_trans->addr || !in_trans->size) - return -EINVAL; - xfer = kmalloc(sizeof(*xfer), GFP_KERNEL); if (!xfer) return -ENOMEM; diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c index e9a1cb779b30..6b6d981a71be 100644 --- a/drivers/accel/qaic/qaic_data.c +++ b/drivers/accel/qaic/qaic_data.c @@ -1021,6 +1021,7 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi bo->dbc = dbc; srcu_read_unlock(&dbc->ch_lock, rcu_id); drm_gem_object_put(obj); + kfree(slice_ent); srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 50e23762ec5e..025e803ba55c 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -613,6 +613,9 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0f5), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 21fe9854703f..4cb23b9e06ea 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -2142,6 +2142,8 @@ static int sysc_reset(struct sysc *ddata) sysc_val = sysc_read_sysconfig(ddata); sysc_val |= sysc_mask; sysc_write(ddata, sysc_offset, sysc_val); + /* Flush posted write */ + sysc_val = sysc_read_sysconfig(ddata); }
if (ddata->cfg.srst_udelay) diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 538bd677c254..7a4d1a478e33 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -479,7 +479,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net, struct sk_buff *skb, u16 source_node_id, bool is_broadcast, u16 ether_type) { - int status; + int status, len;
switch (ether_type) { case ETH_P_ARP: @@ -533,13 +533,15 @@ static int fwnet_finish_incoming_packet(struct net_device *net, } skb->protocol = protocol; } + + len = skb->len; status = netif_rx(skb); if (status == NET_RX_DROP) { net->stats.rx_errors++; net->stats.rx_dropped++; } else { net->stats.rx_packets++; - net->stats.rx_bytes += skb->len; + net->stats.rx_bytes += len; }
return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index a989ae72a58a..0c023269aada 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -189,7 +189,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, uint64_t *chunk_array_user; uint64_t *chunk_array; uint32_t uf_offset = 0; - unsigned int size; + size_t size; int ret; int i;
@@ -1625,15 +1625,15 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, continue;
r = dma_fence_wait_timeout(fence, true, timeout); + if (r > 0 && fence->error) + r = fence->error; + dma_fence_put(fence); if (r < 0) return r;
if (r == 0) break; - - if (fence->error) - return fence->error; }
memset(wait, 0, sizeof(*wait)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c index c6d4d41c4393..23d054526e7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c @@ -106,3 +106,41 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, ttm_eu_backoff_reservation(&ticket, &list); return 0; } + +int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va, + uint64_t csa_addr) +{ + struct ww_acquire_ctx ticket; + struct list_head list; + struct amdgpu_bo_list_entry pd; + struct ttm_validate_buffer csa_tv; + int r; + + INIT_LIST_HEAD(&list); + INIT_LIST_HEAD(&csa_tv.head); + csa_tv.bo = &bo->tbo; + csa_tv.num_shared = 1; + + list_add(&csa_tv.head, &list); + amdgpu_vm_get_pd_bo(vm, &list, &pd); + + r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); + if (r) { + DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); + return r; + } + + r = amdgpu_vm_bo_unmap(adev, bo_va, csa_addr); + if (r) { + DRM_ERROR("failed to do bo_unmap on static CSA, err=%d\n", r); + ttm_eu_backoff_reservation(&ticket, &list); + return r; + } + + amdgpu_vm_bo_del(adev, bo_va); + + ttm_eu_backoff_reservation(&ticket, &list); + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h index 524b4437a021..7dfc1f2012eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h @@ -34,6 +34,9 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va, uint64_t csa_addr, uint32_t size); +int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va, + uint64_t csa_addr); void amdgpu_free_static_csa(struct amdgpu_bo **bo);
#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 44a902d9b5c7..3108f5219cf3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4250,6 +4250,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
cancel_delayed_work_sync(&adev->delayed_init_work); + flush_delayed_work(&adev->gfx.gfx_off_delay_work);
amdgpu_ras_suspend(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index a7d250809da9..b9ba01b4c992 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -555,6 +555,41 @@ int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev) return 0; }
+/** + * amdgpu_fence_need_ring_interrupt_restore - helper function to check whether + * fence driver interrupts need to be restored. + * + * @ring: ring that to be checked + * + * Interrupts for rings that belong to GFX IP don't need to be restored + * when the target power state is s0ix. + * + * Return true if need to restore interrupts, false otherwise. + */ +static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + bool is_gfx_power_domain = false; + + switch (ring->funcs->type) { + case AMDGPU_RING_TYPE_SDMA: + /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */ + if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) + is_gfx_power_domain = true; + break; + case AMDGPU_RING_TYPE_GFX: + case AMDGPU_RING_TYPE_COMPUTE: + case AMDGPU_RING_TYPE_KIQ: + case AMDGPU_RING_TYPE_MES: + is_gfx_power_domain = true; + break; + default: + break; + } + + return !(adev->in_s0ix && is_gfx_power_domain); +} + /** * amdgpu_fence_driver_hw_fini - tear down the fence driver * for all possible rings. @@ -583,7 +618,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) amdgpu_fence_driver_force_completion(ring);
if (!drm_dev_is_unplugged(adev_to_drm(adev)) && - ring->fence_drv.irq_src) + ring->fence_drv.irq_src && + amdgpu_fence_need_ring_interrupt_restore(ring)) amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type);
@@ -658,7 +694,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) continue;
/* enable the interrupt */ - if (ring->fence_drv.irq_src) + if (ring->fence_drv.irq_src && + amdgpu_fence_need_ring_interrupt_restore(ring)) amdgpu_irq_get(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index f3f541ba0aca..bff5b6eac39b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -589,15 +589,8 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state) { - /* If going to s2idle, no need to wait */ - if (adev->in_s0ix) { - if (!amdgpu_dpm_set_powergating_by_smu(adev, - AMD_IP_BLOCK_TYPE_GFX, true)) - adev->gfx.gfx_off_state = true; - } else { - schedule_delayed_work(&adev->gfx.gfx_off_delay_work, + schedule_delayed_work(&adev->gfx.gfx_off_delay_work, delay); - } } } else { if (adev->gfx.gfx_off_req_count == 0) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index fafebec5b7b6..9581c020d815 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -124,7 +124,6 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev) continue;
for (k = 0; k < src->num_types; ++k) { - atomic_set(&src->enabled_types[k], 0); r = src->funcs->set(adev, src, k, AMDGPU_IRQ_STATE_DISABLE); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 0efb38539d70..724e80c19297 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1284,12 +1284,12 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL) amdgpu_vce_free_handles(adev, file_priv);
- if (amdgpu_mcbp) { - /* TODO: how to handle reserve failure */ - BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true)); - amdgpu_vm_bo_del(adev, fpriv->csa_va); + if (fpriv->csa_va) { + uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK; + + WARN_ON(amdgpu_unmap_static_csa(adev, &fpriv->vm, adev->virt.csa_obj, + fpriv->csa_va, csa_addr)); fpriv->csa_va = NULL; - amdgpu_bo_unreserve(adev->virt.csa_obj); }
pasid = fpriv->vm.pasid; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index db820331f2c6..39e54685653c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -520,6 +520,8 @@ static int psp_sw_fini(void *handle) kfree(cmd); cmd = NULL;
+ psp_free_shared_bufs(psp); + if (psp->km_ring.ring_mem) amdgpu_bo_free_kernel(&adev->firmware.rbuf, &psp->km_ring.ring_mem_mc_addr, @@ -2657,8 +2659,6 @@ static int psp_hw_fini(void *handle)
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
- psp_free_shared_bufs(psp); - return 0; }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 49de3a3eebc7..de04606c2061 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -361,6 +361,8 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) amdgpu_bo_free_kernel(&ring->ring_obj, &ring->gpu_addr, (void **)&ring->ring); + } else { + kfree(ring->fence_drv.fences); }
dma_fence_put(ring->vmid_wait); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 23f52150ebef..fd029d91a340 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1367,6 +1367,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
bo_va->ref_count = 1; + bo_va->last_pt_update = dma_fence_get_stub(); INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->invalids);
@@ -2088,7 +2089,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) vm->update_funcs = &amdgpu_vm_cpu_funcs; else vm->update_funcs = &amdgpu_vm_sdma_funcs; - vm->last_update = NULL; + + vm->last_update = dma_fence_get_stub(); vm->last_unlocked = dma_fence_get_stub(); vm->last_tlb_flush = dma_fence_get_stub();
@@ -2213,7 +2215,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) goto unreserve_bo;
dma_fence_put(vm->last_update); - vm->last_update = NULL; + vm->last_update = dma_fence_get_stub(); vm->is_compute_context = true;
/* Free the shadow bo for compute VM */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index bdce36754436..4dd9a85f5c72 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1653,11 +1653,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
- /* Disable SubVP + DRR config by default */ - init_data.flags.disable_subvp_drr = true; - if (amdgpu_dc_feature_mask & DC_ENABLE_SUBVP_DRR) - init_data.flags.disable_subvp_drr = false; - init_data.flags.seamless_boot_edp_requested = false;
if (check_seamless_boot_capability(adev)) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 8d9444db092a..eea103908b09 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -233,6 +233,32 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) DC_FP_END(); }
+static void dcn32_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr, + struct dc_state *context, + int ref_dtbclk_khz) +{ + struct dccg *dccg = clk_mgr->dccg; + uint32_t tg_mask = 0; + int i; + + for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + struct dtbclk_dto_params dto_params = {0}; + + /* use mask to program DTO once per tg */ + if (pipe_ctx->stream_res.tg && + !(tg_mask & (1 << pipe_ctx->stream_res.tg->inst))) { + tg_mask |= (1 << pipe_ctx->stream_res.tg->inst); + + dto_params.otg_inst = pipe_ctx->stream_res.tg->inst; + dto_params.ref_dtbclk_khz = ref_dtbclk_khz; + + dccg->funcs->set_dtbclk_dto(clk_mgr->dccg, &dto_params); + //dccg->funcs->set_audio_dtbclk_dto(clk_mgr->dccg, &dto_params); + } + } +} + /* Since DPPCLK request to PMFW needs to be exact (due to DPP DTO programming), * update DPPCLK to be the exact frequency that will be set after the DPPCLK * divider is updated. This will prevent rounding issues that could cause DPP @@ -570,6 +596,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, /* DCCG requires KHz precision for DTBCLK */ clk_mgr_base->clks.ref_dtbclk_khz = dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz)); + dcn32_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz); }
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 1d8c5805ef20..77ef474ced07 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -712,7 +712,7 @@ static const struct dc_debug_options debug_defaults_drv = { .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c index 4c2fdfea162f..65c1d754e2d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c @@ -47,6 +47,14 @@ void dccg31_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ if (dccg->dpp_clock_gated[dpp_inst]) { + /* + * Do not update the DPPCLK DTO if the clock is stopped. + * It is treated the same as if the pipe itself were in PG. + */ + return; + } + if (dccg->ref_dppclk && req_dppclk) { int ref_dppclk = dccg->ref_dppclk; int modulo, phase; diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c index de7bfba2c179..afeb9f4d5344 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c @@ -322,6 +322,9 @@ static void dccg314_dpp_root_clock_control( { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ if (dccg->dpp_clock_gated[dpp_inst] != clock_on) + return; + if (clock_on) { /* turn off the DTO and leave phase/modulo at max */ REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 0); @@ -335,6 +338,8 @@ static void dccg314_dpp_root_clock_control( DPPCLK0_DTO_PHASE, 0, DPPCLK0_DTO_MODULO, 1); } + + dccg->dpp_clock_gated[dpp_inst] = !clock_on; }
static const struct dccg_funcs dccg314_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index abeeede38fb3..653b5f15d4ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -921,6 +921,22 @@ static const struct dc_debug_options debug_defaults_drv = { .afmt = true, } }, + + .root_clock_optimization = { + .bits = { + .dpp = true, + .dsc = false, + .hdmistream = false, + .hdmichar = false, + .dpstream = false, + .symclk32_se = false, + .symclk32_le = false, + .symclk_fe = false, + .physymclk = false, + .dpiasymclk = false, + } + }, + .seamless_boot_odm_combine = true };
@@ -1920,6 +1936,10 @@ static bool dcn314_resource_construct( dc->debug = debug_defaults_drv; else dc->debug = debug_defaults_diags; + + /* Disable root clock optimization */ + dc->debug.root_clock_optimization.u32All = 0; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index 7661f8946aa3..9ec767ebf5d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -1097,10 +1097,6 @@ void dcn20_calculate_dlg_params(struct dc *dc, context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; - if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid) - dcn20_adjust_freesync_v_startup( - &context->res_ctx.pipe_ctx[i].stream->timing, - &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
pipe_idx++; } @@ -1914,6 +1910,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co int vlevel = 0; int pipe_split_from[MAX_PIPES]; int pipe_cnt = 0; + int i = 0; display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC); DC_LOGGER_INIT(dc->ctx->logger);
@@ -1937,6 +1934,15 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate); dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid) + dcn20_adjust_freesync_v_startup( + &context->res_ctx.pipe_ctx[i].stream->timing, + &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start); + } + BW_VAL_TRACE_END_WATERMARKS();
goto validate_out; @@ -2209,6 +2215,7 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, int vlevel = 0; int pipe_split_from[MAX_PIPES]; int pipe_cnt = 0; + int i = 0; display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC); DC_LOGGER_INIT(dc->ctx->logger);
@@ -2237,6 +2244,15 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate); dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid) + dcn20_adjust_freesync_v_startup( + &context->res_ctx.pipe_ctx[i].stream->timing, + &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start); + } + BW_VAL_TRACE_END_WATERMARKS();
goto validate_out; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index d8b4119820bf..1bfda6e2b307 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -880,10 +880,6 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc int16_t stretched_drr_us = 0; int16_t drr_stretched_vblank_us = 0; int16_t max_vblank_mallregion = 0; - const struct dc_config *config = &dc->config; - - if (config->disable_subvp_drr) - return false;
// Find SubVP pipe for (i = 0; i < dc->res_pool->pipe_count; i++) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c index d75248b6cae9..9a5150e96017 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c @@ -811,7 +811,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->SwathHeightC[k], TWait, (v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ || - v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= MIN_DCFCLK_FREQ_MHZ) ? + v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ? mode_lib->vba.ip.min_prefetch_in_strobe_us : 0, /* Output */ &v->DSTXAfterScaler[k], @@ -3311,7 +3311,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->swath_width_chroma_ub_this_state[k], v->SwathHeightYThisState[k], v->SwathHeightCThisState[k], v->TWait, - (v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= MIN_DCFCLK_FREQ_MHZ) ? + (v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ? mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
/* Output */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h index d98e36a9a09c..c4745d63039b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h @@ -53,7 +53,7 @@ #define BPP_BLENDED_PIPE 0xffffffff
#define MEM_STROBE_FREQ_MHZ 1600 -#define MIN_DCFCLK_FREQ_MHZ 200 +#define DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ 300 #define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0
struct display_mode_lib; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index ad6acd1b34e1..9651cccb084a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -68,6 +68,7 @@ struct dccg { const struct dccg_funcs *funcs; int pipe_dppclk_khz[MAX_PIPES]; int ref_dppclk; + bool dpp_clock_gated[MAX_PIPES]; //int dtbclk_khz[MAX_PIPES];/* TODO needs to be removed */ //int audio_dtbclk_khz;/* TODO needs to be removed */ //int ref_dtbclk_khz;/* TODO needs to be removed */ diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index e4a22c68517d..f175e65b853a 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -240,7 +240,6 @@ enum DC_FEATURE_MASK { DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default - DC_ENABLE_SUBVP_DRR = (1 << 9), // 0x200, disabled by default };
enum DC_DEBUG_MASK { diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index ea03e8d9a3f6..818379276a58 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1573,9 +1573,9 @@ static int smu_disable_dpms(struct smu_context *smu)
/* * For SMU 13.0.4/11, PMFW will handle the features disablement properly - * for gpu reset case. Driver involvement is unnecessary. + * for gpu reset and S0i3 cases. Driver involvement is unnecessary. */ - if (amdgpu_in_reset(adev)) { + if (amdgpu_in_reset(adev) || adev->in_s0ix) { switch (adev->ip_versions[MP1_HWIP][0]) { case IP_VERSION(13, 0, 4): case IP_VERSION(13, 0, 11): diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 0cda3b276f61..f0800c0c5168 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -588,7 +588,9 @@ static int sienna_cichlid_tables_init(struct smu_context *smu) return -ENOMEM; }
-static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu) +static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu, + bool use_metrics_v3, + bool use_metrics_v2) { struct smu_table_context *smu_table= &smu->smu_table; SmuMetricsExternal_t *metrics_ext = @@ -596,13 +598,11 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s uint32_t throttler_status = 0; int i;
- if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && - (smu->smc_fw_version >= 0x3A4900)) { + if (use_metrics_v3) { for (i = 0; i < THROTTLER_COUNT; i++) throttler_status |= (metrics_ext->SmuMetrics_V3.ThrottlingPercentage[i] ? 1U << i : 0); - } else if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && - (smu->smc_fw_version >= 0x3A4300)) { + } else if (use_metrics_v2) { for (i = 0; i < THROTTLER_COUNT; i++) throttler_status |= (metrics_ext->SmuMetrics_V2.ThrottlingPercentage[i] ? 1U << i : 0); @@ -864,7 +864,7 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu, metrics->TemperatureVrSoc) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; case METRICS_THROTTLER_STATUS: - *value = sienna_cichlid_get_throttler_status_locked(smu); + *value = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2); break; case METRICS_CURR_FANSPEED: *value = use_metrics_v3 ? metrics_v3->CurrFanSpeed : @@ -4017,7 +4017,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, gpu_metrics->current_dclk1 = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_DCLK_1] : use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_DCLK_1] : metrics->CurrClock[PPCLK_DCLK_1];
- gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu); + gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2); gpu_metrics->indep_throttle_status = smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status, sienna_cichlid_throttler_map); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 0454da505687..e1a04461ba88 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3424,6 +3424,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto connector->base.id, connector->name); return NULL; } + if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Composite sync not supported\n", + connector->base.id, connector->name); + }
/* it is incorrect if hsync/vsync width is zero */ if (!hsync_pulse_width || !vsync_pulse_width) { @@ -3470,27 +3474,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto if (info->quirks & EDID_QUIRK_DETAILED_SYNC_PP) { mode->flags |= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC; } else { - switch (pt->misc & DRM_EDID_PT_SYNC_MASK) { - case DRM_EDID_PT_ANALOG_CSYNC: - case DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC: - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Analog composite sync!\n", - connector->base.id, connector->name); - mode->flags |= DRM_MODE_FLAG_CSYNC | DRM_MODE_FLAG_NCSYNC; - break; - case DRM_EDID_PT_DIGITAL_CSYNC: - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Digital composite sync!\n", - connector->base.id, connector->name); - mode->flags |= DRM_MODE_FLAG_CSYNC; - mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ? - DRM_MODE_FLAG_PCSYNC : DRM_MODE_FLAG_NCSYNC; - break; - case DRM_EDID_PT_DIGITAL_SEPARATE_SYNC: - mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ? - DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; - mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? - DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; - break; - } + mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ? + DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; + mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? + DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; }
set_size: diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index e12ba458636c..5ee0479ae6de 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -2752,7 +2752,7 @@ static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void) __drm_atomic_helper_connector_reset(&sdvo_connector->base.base, &conn_state->base.base);
- INIT_LIST_HEAD(&sdvo_connector->base.panel.fixed_modes); + intel_panel_init_alloc(&sdvo_connector->base);
return sdvo_connector; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index cc18e8f66486..78822331f1b7 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -470,12 +470,19 @@ int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val) ret = slpc_set_param(slpc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY, val); - if (ret) + if (ret) { guc_probe_error(slpc_to_guc(slpc), "Failed to set efficient freq(%d): %pe\n", val, ERR_PTR(ret)); - else + } else { slpc->ignore_eff_freq = val;
+ /* Set min to RPn when we disable efficient freq */ + if (val) + ret = slpc_set_param(slpc, + SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, + slpc->min_freq); + } + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&slpc->lock); return ret; @@ -602,9 +609,8 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc) return ret;
if (!slpc->min_freq_softlimit) { - ret = intel_guc_slpc_get_min_freq(slpc, &slpc->min_freq_softlimit); - if (unlikely(ret)) - return ret; + /* Min softlimit is initialized to RPn */ + slpc->min_freq_softlimit = slpc->min_freq; slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit; } else { return intel_guc_slpc_set_min_freq(slpc, @@ -755,6 +761,9 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) return ret; }
+ /* Set cached value of ignore efficient freq */ + intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq); + /* Revert SLPC min/max to softlimits if necessary */ ret = slpc_set_softlimits(slpc); if (unlikely(ret)) { @@ -765,9 +774,6 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) /* Set cached media freq ratio mode */ intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
- /* Set cached value of ignore efficient freq */ - intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq); - return 0; }
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index a2e0033e8a26..622f6eb9a8bf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -1408,8 +1408,7 @@ nouveau_connector_create(struct drm_device *dev, ret = nvif_conn_ctor(&disp->disp, nv_connector->base.name, nv_connector->index, &nv_connector->conn); if (ret) { - kfree(nv_connector); - return ERR_PTR(ret); + goto drm_conn_err; }
ret = nvif_conn_event_ctor(&nv_connector->conn, "kmsHotplug", @@ -1426,8 +1425,7 @@ nouveau_connector_create(struct drm_device *dev, if (ret) { nvif_event_dtor(&nv_connector->hpd); nvif_conn_dtor(&nv_connector->conn); - kfree(nv_connector); - return ERR_PTR(ret); + goto drm_conn_err; } } } @@ -1475,4 +1473,9 @@ nouveau_connector_create(struct drm_device *dev,
drm_connector_register(connector); return connector; + +drm_conn_err: + drm_connector_cleanup(connector); + kfree(nv_connector); + return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index e02249b212c2..cf6b146acc32 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -969,21 +969,21 @@ static const struct panel_desc auo_g104sn02 = { .connector_type = DRM_MODE_CONNECTOR_LVDS, };
-static const struct drm_display_mode auo_g121ean01_mode = { - .clock = 66700, - .hdisplay = 1280, - .hsync_start = 1280 + 58, - .hsync_end = 1280 + 58 + 8, - .htotal = 1280 + 58 + 8 + 70, - .vdisplay = 800, - .vsync_start = 800 + 6, - .vsync_end = 800 + 6 + 4, - .vtotal = 800 + 6 + 4 + 10, +static const struct display_timing auo_g121ean01_timing = { + .pixelclock = { 60000000, 74400000, 90000000 }, + .hactive = { 1280, 1280, 1280 }, + .hfront_porch = { 20, 50, 100 }, + .hback_porch = { 20, 50, 100 }, + .hsync_len = { 30, 100, 200 }, + .vactive = { 800, 800, 800 }, + .vfront_porch = { 2, 10, 25 }, + .vback_porch = { 2, 10, 25 }, + .vsync_len = { 4, 18, 50 }, };
static const struct panel_desc auo_g121ean01 = { - .modes = &auo_g121ean01_mode, - .num_modes = 1, + .timings = &auo_g121ean01_timing, + .num_timings = 1, .bpc = 8, .size = { .width = 261, diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index ea993d7162e8..307a890fde13 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -310,7 +310,7 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev, u32 domain, size_t size, struct qxl_surface *surf, - struct qxl_bo **qobj, + struct drm_gem_object **gobj, uint32_t *handle); void qxl_gem_object_free(struct drm_gem_object *gobj); int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv); diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c index d636ba685451..17df5c7ccf69 100644 --- a/drivers/gpu/drm/qxl/qxl_dumb.c +++ b/drivers/gpu/drm/qxl/qxl_dumb.c @@ -34,6 +34,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv, { struct qxl_device *qdev = to_qxl(dev); struct qxl_bo *qobj; + struct drm_gem_object *gobj; uint32_t handle; int r; struct qxl_surface surf; @@ -62,11 +63,13 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
r = qxl_gem_object_create_with_handle(qdev, file_priv, QXL_GEM_DOMAIN_CPU, - args->size, &surf, &qobj, + args->size, &surf, &gobj, &handle); if (r) return r; + qobj = gem_to_qxl_bo(gobj); qobj->is_dumb = true; + drm_gem_object_put(gobj); args->pitch = pitch; args->handle = handle; return 0; diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c index a08da0bd9098..fc5e3763c359 100644 --- a/drivers/gpu/drm/qxl/qxl_gem.c +++ b/drivers/gpu/drm/qxl/qxl_gem.c @@ -72,32 +72,41 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size, return 0; }
+/* + * If the caller passed a valid gobj pointer, it is responsible to call + * drm_gem_object_put() when it no longer needs to acess the object. + * + * If gobj is NULL, it is handled internally. + */ int qxl_gem_object_create_with_handle(struct qxl_device *qdev, struct drm_file *file_priv, u32 domain, size_t size, struct qxl_surface *surf, - struct qxl_bo **qobj, + struct drm_gem_object **gobj, uint32_t *handle) { - struct drm_gem_object *gobj; int r; + struct drm_gem_object *local_gobj;
- BUG_ON(!qobj); BUG_ON(!handle);
r = qxl_gem_object_create(qdev, size, 0, domain, false, false, surf, - &gobj); + &local_gobj); if (r) return -ENOMEM; - r = drm_gem_handle_create(file_priv, gobj, handle); + r = drm_gem_handle_create(file_priv, local_gobj, handle); if (r) return r; - /* drop reference from allocate - handle holds it now */ - *qobj = gem_to_qxl_bo(gobj); - drm_gem_object_put(gobj); + + if (gobj) + *gobj = local_gobj; + else + /* drop reference from allocate - handle holds it now */ + drm_gem_object_put(local_gobj); + return 0; }
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 30f58b21372a..dd0f834d881c 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -38,7 +38,6 @@ int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_alloc *qxl_alloc = data; int ret; - struct qxl_bo *qobj; uint32_t handle; u32 domain = QXL_GEM_DOMAIN_VRAM;
@@ -50,7 +49,7 @@ int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr domain, qxl_alloc->size, NULL, - &qobj, &handle); + NULL, &handle); if (ret) { DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); @@ -386,7 +385,6 @@ int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *fi { struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_alloc_surf *param = data; - struct qxl_bo *qobj; int handle; int ret; int size, actual_stride; @@ -406,7 +404,7 @@ int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *fi QXL_GEM_DOMAIN_SURFACE, size, &surf, - &qobj, &handle); + NULL, &handle); if (ret) { DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index d6d29be6b4f4..7e175dbfd892 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -223,20 +223,6 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) * DU channels that have a display PLL can't use the internal * system clock, and have no internal clock divider. */ - - /* - * The H3 ES1.x exhibits dot clock duty cycle stability issues. - * We can work around them by configuring the DPLL to twice the - * desired frequency, coupled with a /2 post-divider. Restrict - * the workaround to H3 ES1.x as ES2.0 and all other SoCs have - * no post-divider when a display PLL is present (as shown by - * the workaround breaking HDMI output on M3-W during testing). - */ - if (rcdu->info->quirks & RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY) { - target *= 2; - div = 1; - } - extclk = clk_get_rate(rcrtc->extclock); rcar_du_dpll_divider(rcrtc, &dpll, extclk, target);
@@ -245,30 +231,13 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m) | DPLLCR_STBY;
- if (rcrtc->index == 1) { + if (rcrtc->index == 1) dpllcr |= DPLLCR_PLCS1 | DPLLCR_INCS_DOTCLKIN1; - } else { - dpllcr |= DPLLCR_PLCS0_PLL + else + dpllcr |= DPLLCR_PLCS0 | DPLLCR_INCS_DOTCLKIN0;
- /* - * On ES2.x we have a single mux controlled via bit 21, - * which selects between DCLKIN source (bit 21 = 0) and - * a PLL source (bit 21 = 1), where the PLL is always - * PLL1. - * - * On ES1.x we have an additional mux, controlled - * via bit 20, for choosing between PLL0 (bit 20 = 0) - * and PLL1 (bit 20 = 1). We always want to use PLL1, - * so on ES1.x, in addition to setting bit 21, we need - * to set the bit 20. - */ - - if (rcdu->info->quirks & RCAR_DU_QUIRK_H3_ES1_PLL) - dpllcr |= DPLLCR_PLCS0_H3ES1X_PLL1; - } - rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr);
escr = ESCR_DCLKSEL_DCLKIN | div; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index b9a94c5260e9..1ffde19cb87f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -16,7 +16,6 @@ #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/slab.h> -#include <linux/sys_soc.h> #include <linux/wait.h>
#include <drm/drm_atomic_helper.h> @@ -387,43 +386,6 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = { .dpll_mask = BIT(2) | BIT(1), };
-static const struct rcar_du_device_info rcar_du_r8a7795_es1_info = { - .gen = 3, - .features = RCAR_DU_FEATURE_CRTC_IRQ - | RCAR_DU_FEATURE_CRTC_CLOCK - | RCAR_DU_FEATURE_VSP1_SOURCE - | RCAR_DU_FEATURE_INTERLACED - | RCAR_DU_FEATURE_TVM_SYNC, - .quirks = RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY - | RCAR_DU_QUIRK_H3_ES1_PLL, - .channels_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0), - .routes = { - /* - * R8A7795 has one RGB output, two HDMI outputs and one - * LVDS output. - */ - [RCAR_DU_OUTPUT_DPAD0] = { - .possible_crtcs = BIT(3), - .port = 0, - }, - [RCAR_DU_OUTPUT_HDMI0] = { - .possible_crtcs = BIT(1), - .port = 1, - }, - [RCAR_DU_OUTPUT_HDMI1] = { - .possible_crtcs = BIT(2), - .port = 2, - }, - [RCAR_DU_OUTPUT_LVDS0] = { - .possible_crtcs = BIT(0), - .port = 3, - }, - }, - .num_lvds = 1, - .num_rpf = 5, - .dpll_mask = BIT(2) | BIT(1), -}; - static const struct rcar_du_device_info rcar_du_r8a7796_info = { .gen = 3, .features = RCAR_DU_FEATURE_CRTC_IRQ @@ -614,11 +576,6 @@ static const struct of_device_id rcar_du_of_table[] = {
MODULE_DEVICE_TABLE(of, rcar_du_of_table);
-static const struct soc_device_attribute rcar_du_soc_table[] = { - { .soc_id = "r8a7795", .revision = "ES1.*", .data = &rcar_du_r8a7795_es1_info }, - { /* sentinel */ } -}; - const char *rcar_du_output_name(enum rcar_du_output output) { static const char * const names[] = { @@ -707,7 +664,6 @@ static void rcar_du_shutdown(struct platform_device *pdev)
static int rcar_du_probe(struct platform_device *pdev) { - const struct soc_device_attribute *soc_attr; struct rcar_du_device *rcdu; unsigned int mask; int ret; @@ -725,10 +681,6 @@ static int rcar_du_probe(struct platform_device *pdev)
rcdu->info = of_device_get_match_data(rcdu->dev);
- soc_attr = soc_device_match(rcar_du_soc_table); - if (soc_attr) - rcdu->info = soc_attr->data; - platform_set_drvdata(pdev, rcdu);
/* I/O resources */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index acc3673fefe1..5cfa2bb7ad93 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -34,8 +34,6 @@ struct rcar_du_device; #define RCAR_DU_FEATURE_NO_BLENDING BIT(5) /* PnMR.SPIM does not have ALP nor EOR bits */
#define RCAR_DU_QUIRK_ALIGN_128B BIT(0) /* Align pitches to 128 bytes */ -#define RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY BIT(1) /* H3 ES1 has pclk stability issue */ -#define RCAR_DU_QUIRK_H3_ES1_PLL BIT(2) /* H3 ES1 PLL setup differs from non-ES1 */
enum rcar_du_output { RCAR_DU_OUTPUT_DPAD0, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h index 6c750fab6ebb..391de6661d8b 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h @@ -283,8 +283,7 @@ #define DPLLCR 0x20044 #define DPLLCR_CODE (0x95 << 24) #define DPLLCR_PLCS1 (1 << 23) -#define DPLLCR_PLCS0_PLL (1 << 21) -#define DPLLCR_PLCS0_H3ES1X_PLL1 (1 << 20) +#define DPLLCR_PLCS0 (1 << 21) #define DPLLCR_CLKE (1 << 18) #define DPLLCR_FDPLL(n) ((n) << 12) #define DPLLCR_N(n) ((n) << 5) diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index e0a8890a62e2..3e2a31d8190e 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -448,6 +448,12 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) drm_sched_rq_update_fifo(entity, next->submit_ts); }
+ /* Jobs and entities might have different lifecycles. Since we're + * removing the job from the entities queue, set the jobs entity pointer + * to NULL to prevent any future access of the entity through this job. + */ + sched_job->entity = NULL; + return sched_job; }
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index aea5a90ff98b..cdd67676c3d1 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -42,6 +42,10 @@ * the hardware. * * The jobs in a entity are always scheduled in the order that they were pushed. + * + * Note that once a job was taken from the entities queue and pushed to the + * hardware, i.e. the pending queue, the entity must not be referenced anymore + * through the jobs entity pointer. */
#include <linux/kthread.h> diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index 03c6becda795..b8be4c1db423 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -1145,7 +1145,7 @@ static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
static int ltdc_crtc_set_crc_source(struct drm_crtc *crtc, const char *source) { - struct ltdc_device *ldev = crtc_to_ltdc(crtc); + struct ltdc_device *ldev; int ret;
DRM_DEBUG_DRIVER("\n"); @@ -1153,6 +1153,8 @@ static int ltdc_crtc_set_crc_source(struct drm_crtc *crtc, const char *source) if (!crtc) return -ENODEV;
+ ldev = crtc_to_ltdc(crtc); + if (source && strcmp(source, "auto") == 0) { ldev->crc_active = true; ret = regmap_set_bits(ldev->regmap, LTDC_GCR, GCR_CRCEN); diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index f7e06d433a91..dfe8e09a18de 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -4608,6 +4608,8 @@ static const struct hid_device_id hidpp_devices[] = { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC086) }, { /* Logitech G903 Hero Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC091) }, + { /* Logitech G915 TKL Keyboard over USB */ + HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC343) }, { /* Logitech G920 Wheel over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL), .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS}, @@ -4630,6 +4632,8 @@ static const struct hid_device_id hidpp_devices[] = { { /* MX5500 keyboard over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, + { /* Logitech G915 TKL keyboard over Bluetooth */ + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb35f) }, { /* M-RCQ142 V470 Cordless Laser Mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb008) }, { /* MX Master mouse over Bluetooth */ diff --git a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c index 0060e3dcd775..db4639db9840 100644 --- a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c +++ b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c @@ -28,6 +28,7 @@ struct i2c_hid_of_goodix { struct regulator *vdd; struct regulator *vddio; struct gpio_desc *reset_gpio; + bool no_reset_during_suspend; const struct goodix_i2c_hid_timing_data *timings; };
@@ -37,6 +38,14 @@ static int goodix_i2c_hid_power_up(struct i2chid_ops *ops) container_of(ops, struct i2c_hid_of_goodix, ops); int ret;
+ /* + * We assert reset GPIO here (instead of during power-down) to ensure + * the device will have a clean state after powering up, just like the + * normal scenarios will have. + */ + if (ihid_goodix->no_reset_during_suspend) + gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1); + ret = regulator_enable(ihid_goodix->vdd); if (ret) return ret; @@ -60,7 +69,9 @@ static void goodix_i2c_hid_power_down(struct i2chid_ops *ops) struct i2c_hid_of_goodix *ihid_goodix = container_of(ops, struct i2c_hid_of_goodix, ops);
- gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1); + if (!ihid_goodix->no_reset_during_suspend) + gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1); + regulator_disable(ihid_goodix->vddio); regulator_disable(ihid_goodix->vdd); } @@ -91,6 +102,9 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client) if (IS_ERR(ihid_goodix->vddio)) return PTR_ERR(ihid_goodix->vddio);
+ ihid_goodix->no_reset_during_suspend = + of_property_read_bool(client->dev.of_node, "goodix,no-reset-during-suspend"); + ihid_goodix->timings = device_get_match_data(&client->dev);
return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001, 0); diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h index fc108f19a64c..e99f3a3c65e1 100644 --- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h +++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h @@ -33,6 +33,7 @@ #define ADL_N_DEVICE_ID 0x54FC #define RPL_S_DEVICE_ID 0x7A78 #define MTL_P_DEVICE_ID 0x7E45 +#define ARL_H_DEVICE_ID 0x7745
#define REVISION_ID_CHT_A0 0x6 #define REVISION_ID_CHT_Ax_SI 0x0 diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 7120b30ac51d..55cb25038e63 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -44,6 +44,7 @@ static const struct pci_device_id ish_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_N_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, RPL_S_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MTL_P_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ARL_H_DEVICE_ID)}, {0, } }; MODULE_DEVICE_TABLE(pci, ish_pci_tbl); diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 85d8a6b04885..30a2a3200bed 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -233,13 +233,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c, u32 offset) { u32 val; + unsigned long flags;
if (iproc_i2c->idm_base) { - spin_lock(&iproc_i2c->idm_lock); + spin_lock_irqsave(&iproc_i2c->idm_lock, flags); writel(iproc_i2c->ape_addr_mask, iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET); val = readl(iproc_i2c->base + offset); - spin_unlock(&iproc_i2c->idm_lock); + spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags); } else { val = readl(iproc_i2c->base + offset); } @@ -250,12 +251,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c, static inline void iproc_i2c_wr_reg(struct bcm_iproc_i2c_dev *iproc_i2c, u32 offset, u32 val) { + unsigned long flags; + if (iproc_i2c->idm_base) { - spin_lock(&iproc_i2c->idm_lock); + spin_lock_irqsave(&iproc_i2c->idm_lock, flags); writel(iproc_i2c->ape_addr_mask, iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET); writel(val, iproc_i2c->base + offset); - spin_unlock(&iproc_i2c->idm_lock); + spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags); } else { writel(val, iproc_i2c->base + offset); } diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 55ea91a63382..c51fc1f4b97e 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -526,9 +526,21 @@ i2c_dw_read(struct dw_i2c_dev *dev) u32 flags = msgs[dev->msg_read_idx].flags;
regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); + tmp &= DW_IC_DATA_CMD_DAT; /* Ensure length byte is a valid value */ - if (flags & I2C_M_RECV_LEN && - (tmp & DW_IC_DATA_CMD_DAT) <= I2C_SMBUS_BLOCK_MAX && tmp > 0) { + if (flags & I2C_M_RECV_LEN) { + /* + * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be + * detected from the registers, the controller can be + * disabled if the STOP bit is set. But it is only set + * after receiving block data response length in + * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read + * another byte with STOP bit set when the block data + * response length is invalid to complete the transaction. + */ + if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX) + tmp = 1; + len = i2c_dw_recv_len(dev, tmp); } *buf++ = tmp; diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c index e067671b3ce2..0980c773cb5b 100644 --- a/drivers/i2c/busses/i2c-hisi.c +++ b/drivers/i2c/busses/i2c-hisi.c @@ -330,6 +330,14 @@ static irqreturn_t hisi_i2c_irq(int irq, void *context) struct hisi_i2c_controller *ctlr = context; u32 int_stat;
+ /* + * Don't handle the interrupt if cltr->completion is NULL. We may + * reach here because the interrupt is spurious or the transfer is + * started by another port (e.g. firmware) rather than us. + */ + if (!ctlr->completion) + return IRQ_NONE; + int_stat = readl(ctlr->iobase + HISI_I2C_INT_MSTAT); hisi_i2c_clear_int(ctlr, int_stat); if (!(int_stat & HISI_I2C_INT_ALL)) diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 157066f06a32..d561cf066d70 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -449,7 +449,7 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev) if (i2c_dev->is_vi) return 0;
- if (!i2c_dev->hw->has_apb_dma) { + if (i2c_dev->hw->has_apb_dma) { if (!IS_ENABLED(CONFIG_TEGRA20_APB_DMA)) { dev_dbg(i2c_dev->dev, "APB DMA support not enabled\n"); return 0; diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index 2c95e6f3d47a..eef3ef3fabb4 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -179,6 +179,8 @@ struct bnxt_re_dev { #define BNXT_RE_ROCEV2_IPV4_PACKET 2 #define BNXT_RE_ROCEV2_IPV6_PACKET 3
+#define BNXT_RE_CHECK_RC(x) ((x) && ((x) != -ETIMEDOUT)) + static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev) { if (rdev) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index ebe6852c40e8..e7f153ee2754 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -614,12 +614,20 @@ int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags) { struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); struct bnxt_re_dev *rdev = ah->rdev; + bool block = true; + int rc = 0;
- bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, - !(flags & RDMA_DESTROY_AH_SLEEPABLE)); + block = !(flags & RDMA_DESTROY_AH_SLEEPABLE); + rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block); + if (BNXT_RE_CHECK_RC(rc)) { + if (rc == -ETIMEDOUT) + rc = 0; + else + goto fail; + } atomic_dec(&rdev->ah_count); - - return 0; +fail: + return rc; }
static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index b967a17a44be..10919532bca2 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -468,13 +468,14 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, return 0; }
-void bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, - bool block) +int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, + bool block) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct creq_destroy_ah_resp resp = {}; struct bnxt_qplib_cmdqmsg msg = {}; struct cmdq_destroy_ah req = {}; + int rc;
/* Clean up the AH table in the device */ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, @@ -485,7 +486,8 @@ void bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), block); - bnxt_qplib_rcfw_send_message(rcfw, &msg); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + return rc; }
/* MRW */ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 5de874659cdf..4061616048e8 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -327,8 +327,8 @@ int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res, struct bnxt_qplib_ctx *ctx); int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, bool block); -void bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, - bool block); +int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, + bool block); int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw); int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw, diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c index 54b61930a7fd..4b3b5b274e84 100644 --- a/drivers/infiniband/hw/mana/qp.c +++ b/drivers/infiniband/hw/mana/qp.c @@ -13,7 +13,7 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev, u8 *rx_hash_key) { struct mana_port_context *mpc = netdev_priv(ndev); - struct mana_cfg_rx_steer_req *req = NULL; + struct mana_cfg_rx_steer_req_v2 *req; struct mana_cfg_rx_steer_resp resp = {}; mana_handle_t *req_indir_tab; struct gdma_context *gc; @@ -33,6 +33,8 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev, mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size, sizeof(resp));
+ req->hdr.req.msg_version = GDMA_MESSAGE_V2; + req->vport = mpc->port_handle; req->rx_enable = 1; req->update_default_rxobj = 1; @@ -46,6 +48,7 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev, req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE; req->indir_tab_offset = sizeof(*req); req->update_indir_tab = true; + req->cqe_coalescing_enable = 1;
req_indir_tab = (mana_handle_t *)(req + 1); /* The ind table passed to the hardware must have diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c index bae0334d6e7f..aec011557b4a 100644 --- a/drivers/infiniband/hw/mlx5/qpc.c +++ b/drivers/infiniband/hw/mlx5/qpc.c @@ -298,8 +298,7 @@ int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp) MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); MLX5_SET(destroy_qp_in, in, uid, qp->uid); - mlx5_cmd_exec_in(dev->mdev, destroy_qp, in); - return 0; + return mlx5_cmd_exec_in(dev->mdev, destroy_qp, in); }
int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev, @@ -551,14 +550,14 @@ int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn) return mlx5_cmd_exec_in(dev->mdev, dealloc_xrcd, in); }
-static void destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid) +static int destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid) { u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ); MLX5_SET(destroy_rq_in, in, rqn, rqn); MLX5_SET(destroy_rq_in, in, uid, uid); - mlx5_cmd_exec_in(dev->mdev, destroy_rq, in); + return mlx5_cmd_exec_in(dev->mdev, destroy_rq, in); }
int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen, @@ -589,8 +588,7 @@ int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev, struct mlx5_core_qp *rq) { destroy_resource_common(dev, rq); - destroy_rq_tracked(dev, rq->qpn, rq->uid); - return 0; + return destroy_rq_tracked(dev, rq->qpn, rq->uid); }
static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid) diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 2ddbda3a4374..5a224e244be8 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -174,6 +174,7 @@ #define CONTROL_GAINT_EN 29 #define CONTROL_XT_EN 50 #define CONTROL_INTCAPXT_EN 51 +#define CONTROL_IRTCACHEDIS 59 #define CONTROL_SNPAVIC_EN 61
#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT) @@ -716,6 +717,9 @@ struct amd_iommu { /* if one, we need to send a completion wait command */ bool need_sync;
+ /* true if disable irte caching */ + bool irtcachedis_enabled; + /* Handle for IOMMU core code */ struct iommu_device iommu;
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index c2d80a4e5fb0..02846299af0e 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -162,6 +162,7 @@ static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; static bool amd_iommu_detected; static bool amd_iommu_disabled __initdata; static bool amd_iommu_force_enable __initdata; +static bool amd_iommu_irtcachedis; static int amd_iommu_target_ivhd_type;
/* Global EFR and EFR2 registers */ @@ -484,6 +485,9 @@ static void iommu_disable(struct amd_iommu *iommu)
/* Disable IOMMU hardware itself */ iommu_feature_disable(iommu, CONTROL_IOMMU_EN); + + /* Clear IRTE cache disabling bit */ + iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS); }
/* @@ -2710,6 +2714,33 @@ static void iommu_enable_ga(struct amd_iommu *iommu) #endif }
+static void iommu_disable_irtcachedis(struct amd_iommu *iommu) +{ + iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS); +} + +static void iommu_enable_irtcachedis(struct amd_iommu *iommu) +{ + u64 ctrl; + + if (!amd_iommu_irtcachedis) + return; + + /* + * Note: + * The support for IRTCacheDis feature is dertermined by + * checking if the bit is writable. + */ + iommu_feature_enable(iommu, CONTROL_IRTCACHEDIS); + ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); + ctrl &= (1ULL << CONTROL_IRTCACHEDIS); + if (ctrl) + iommu->irtcachedis_enabled = true; + pr_info("iommu%d (%#06x) : IRT cache is %s\n", + iommu->index, iommu->devid, + iommu->irtcachedis_enabled ? "disabled" : "enabled"); +} + static void early_enable_iommu(struct amd_iommu *iommu) { iommu_disable(iommu); @@ -2720,6 +2751,7 @@ static void early_enable_iommu(struct amd_iommu *iommu) iommu_set_exclusion_range(iommu); iommu_enable_ga(iommu); iommu_enable_xt(iommu); + iommu_enable_irtcachedis(iommu); iommu_enable(iommu); iommu_flush_all_caches(iommu); } @@ -2770,10 +2802,12 @@ static void early_enable_iommus(void) for_each_iommu(iommu) { iommu_disable_command_buffer(iommu); iommu_disable_event_buffer(iommu); + iommu_disable_irtcachedis(iommu); iommu_enable_command_buffer(iommu); iommu_enable_event_buffer(iommu); iommu_enable_ga(iommu); iommu_enable_xt(iommu); + iommu_enable_irtcachedis(iommu); iommu_set_device_table(iommu); iommu_flush_all_caches(iommu); } @@ -3426,6 +3460,8 @@ static int __init parse_amd_iommu_options(char *str) amd_iommu_pgtable = AMD_IOMMU_V1; } else if (strncmp(str, "pgtbl_v2", 8) == 0) { amd_iommu_pgtable = AMD_IOMMU_V2; + } else if (strncmp(str, "irtcachedis", 11) == 0) { + amd_iommu_irtcachedis = true; } else { pr_notice("Unknown option - '%s'\n", str); } diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c index 1c849814a491..212df2e3d350 100644 --- a/drivers/leds/rgb/leds-qcom-lpg.c +++ b/drivers/leds/rgb/leds-qcom-lpg.c @@ -1173,8 +1173,10 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np) i = 0; for_each_available_child_of_node(np, child) { ret = lpg_parse_channel(lpg, child, &led->channels[i]); - if (ret < 0) + if (ret < 0) { + of_node_put(child); return ret; + }
info[i].color_index = led->channels[i]->color; info[i].intensity = 0; @@ -1352,8 +1354,10 @@ static int lpg_probe(struct platform_device *pdev)
for_each_available_child_of_node(pdev->dev.of_node, np) { ret = lpg_add_led(lpg, np); - if (ret) + if (ret) { + of_node_put(np); return ret; + } }
for (i = 0; i < lpg->num_channels; i++) diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c index 40cb3cb87ba1..60425c99a2b8 100644 --- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c +++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c @@ -1310,6 +1310,8 @@ static int mtk_jpeg_probe(struct platform_device *pdev) jpeg->dev = &pdev->dev; jpeg->variant = of_device_get_match_data(jpeg->dev);
+ platform_set_drvdata(pdev, jpeg); + ret = devm_of_platform_populate(&pdev->dev); if (ret) { v4l2_err(&jpeg->v4l2_dev, "Master of platform populate failed."); @@ -1381,8 +1383,6 @@ static int mtk_jpeg_probe(struct platform_device *pdev) jpeg->variant->dev_name, jpeg->vdev->num, VIDEO_MAJOR, jpeg->vdev->minor);
- platform_set_drvdata(pdev, jpeg); - pm_runtime_enable(&pdev->dev);
return 0; diff --git a/drivers/media/platform/mediatek/vpu/mtk_vpu.c b/drivers/media/platform/mediatek/vpu/mtk_vpu.c index 5e2bc286f168..1a95958a1f90 100644 --- a/drivers/media/platform/mediatek/vpu/mtk_vpu.c +++ b/drivers/media/platform/mediatek/vpu/mtk_vpu.c @@ -562,15 +562,17 @@ static int load_requested_vpu(struct mtk_vpu *vpu, int vpu_load_firmware(struct platform_device *pdev) { struct mtk_vpu *vpu; - struct device *dev = &pdev->dev; + struct device *dev; struct vpu_run *run; int ret;
if (!pdev) { - dev_err(dev, "VPU platform device is invalid\n"); + pr_err("VPU platform device is invalid\n"); return -EINVAL; }
+ dev = &pdev->dev; + vpu = platform_get_drvdata(pdev); run = &vpu->run;
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c index e0832f3f4f25..06c95568e5af 100644 --- a/drivers/media/platform/qcom/camss/camss-vfe.c +++ b/drivers/media/platform/qcom/camss/camss-vfe.c @@ -1541,7 +1541,11 @@ int msm_vfe_register_entities(struct vfe_device *vfe, }
video_out->ops = &vfe->video_ops; - video_out->bpl_alignment = 8; + if (vfe->camss->version == CAMSS_845 || + vfe->camss->version == CAMSS_8250) + video_out->bpl_alignment = 16; + else + video_out->bpl_alignment = 8; video_out->line_based = 0; if (i == VFE_LINE_PIX) { video_out->bpl_alignment = 16; diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index 35453f81c1d9..c06f8ca9e09e 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c @@ -45,7 +45,7 @@ static int uvc_control_add_xu_mapping(struct uvc_video_chain *chain, map->menu_names = NULL; map->menu_mapping = NULL;
- map->menu_mask = BIT_MASK(xmap->menu_count); + map->menu_mask = GENMASK(xmap->menu_count - 1, 0);
size = xmap->menu_count * sizeof(*map->menu_mapping); map->menu_mapping = kzalloc(size, GFP_KERNEL); diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index e46330815484..5d6c16adb50d 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2097,14 +2097,14 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue *mq, mmc_blk_urgent_bkops(mq, mqrq); }
-static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req) +static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type) { unsigned long flags; bool put_card;
spin_lock_irqsave(&mq->lock, flags);
- mq->in_flight[mmc_issue_type(mq, req)] -= 1; + mq->in_flight[issue_type] -= 1;
put_card = (mmc_tot_in_flight(mq) == 0);
@@ -2117,6 +2117,7 @@ static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req) static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req, bool can_sleep) { + enum mmc_issue_type issue_type = mmc_issue_type(mq, req); struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); struct mmc_request *mrq = &mqrq->brq.mrq; struct mmc_host *host = mq->card->host; @@ -2136,7 +2137,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req, blk_mq_complete_request(req); }
- mmc_blk_mq_dec_in_flight(mq, req); + mmc_blk_mq_dec_in_flight(mq, issue_type); }
void mmc_blk_mq_recovery(struct mmc_queue *mq) diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c index b01ffb4d0973..3215063bcf86 100644 --- a/drivers/mmc/host/sdhci_f_sdh30.c +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -210,13 +210,16 @@ static int sdhci_f_sdh30_remove(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); - - reset_control_assert(priv->rst); - clk_disable_unprepare(priv->clk); - clk_disable_unprepare(priv->clk_iface); + struct clk *clk_iface = priv->clk_iface; + struct reset_control *rst = priv->rst; + struct clk *clk = priv->clk;
sdhci_pltfm_unregister(pdev);
+ reset_control_assert(rst); + clk_disable_unprepare(clk); + clk_disable_unprepare(clk_iface); + return 0; }
diff --git a/drivers/mmc/host/sunplus-mmc.c b/drivers/mmc/host/sunplus-mmc.c index db5e0dcdfa7f..2bdebeb1f8e4 100644 --- a/drivers/mmc/host/sunplus-mmc.c +++ b/drivers/mmc/host/sunplus-mmc.c @@ -863,11 +863,9 @@ static int spmmc_drv_probe(struct platform_device *pdev) struct spmmc_host *host; int ret = 0;
- mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); - if (!mmc) { - ret = -ENOMEM; - goto probe_free_host; - } + mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(struct spmmc_host)); + if (!mmc) + return -ENOMEM;
host = mmc_priv(mmc); host->mmc = mmc; @@ -902,7 +900,7 @@ static int spmmc_drv_probe(struct platform_device *pdev)
ret = mmc_of_parse(mmc); if (ret) - goto probe_free_host; + goto clk_disable;
mmc->ops = &spmmc_ops; mmc->f_min = SPMMC_MIN_CLK; @@ -911,7 +909,7 @@ static int spmmc_drv_probe(struct platform_device *pdev)
ret = mmc_regulator_get_supply(mmc); if (ret) - goto probe_free_host; + goto clk_disable;
if (!mmc->ocr_avail) mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; @@ -927,14 +925,17 @@ static int spmmc_drv_probe(struct platform_device *pdev) host->tuning_info.enable_tuning = 1; pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto pm_disable;
- return ret; + return 0;
-probe_free_host: - if (mmc) - mmc_free_host(mmc); +pm_disable: + pm_runtime_disable(&pdev->dev);
+clk_disable: + clk_disable_unprepare(host->clk); return ret; }
@@ -948,7 +949,6 @@ static int spmmc_drv_remove(struct platform_device *dev) pm_runtime_put_noidle(&dev->dev); pm_runtime_disable(&dev->dev); platform_set_drvdata(dev, NULL); - mmc_free_host(host->mmc);
return 0; } diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index 521af9251f33..bf2a92fba0ed 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -1705,8 +1705,6 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma,
wbsd_release_resources(host); wbsd_free_mmc(dev); - - mmc_free_host(mmc); return ret; }
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 642e93e8623e..8c9d05a1fe66 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3006,6 +3006,14 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
/* If there is a GPIO connected to the reset pin, toggle it */ if (gpiod) { + /* If the switch has just been reset and not yet completed + * loading EEPROM, the reset may interrupt the I2C transaction + * mid-byte, causing the first EEPROM read after the reset + * from the wrong location resulting in the switch booting + * to wrong mode and inoperable. + */ + mv88e6xxx_g1_wait_eeprom_done(chip); + gpiod_set_value_cansleep(gpiod, 1); usleep_range(10000, 20000); gpiod_set_value_cansleep(gpiod, 0); diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 29a1199dad14..3fbe15b3ac62 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -5159,6 +5159,9 @@ static int __maybe_unused macb_suspend(struct device *dev) unsigned int q; int err;
+ if (!device_may_wakeup(&bp->dev->dev)) + phy_exit(bp->sgmii_phy); + if (!netif_running(netdev)) return 0;
@@ -5219,7 +5222,6 @@ static int __maybe_unused macb_suspend(struct device *dev) if (!(bp->wol & MACB_WOL_ENABLED)) { rtnl_lock(); phylink_stop(bp->phylink); - phy_exit(bp->sgmii_phy); rtnl_unlock(); spin_lock_irqsave(&bp->lock, flags); macb_reset_hw(bp); @@ -5249,6 +5251,9 @@ static int __maybe_unused macb_resume(struct device *dev) unsigned int q; int err;
+ if (!device_may_wakeup(&bp->dev->dev)) + phy_init(bp->sgmii_phy); + if (!netif_running(netdev)) return 0;
@@ -5309,8 +5314,6 @@ static int __maybe_unused macb_resume(struct device *dev) macb_set_rx_mode(netdev); macb_restore_features(bp); rtnl_lock(); - if (!device_may_wakeup(&bp->dev->dev)) - phy_init(bp->sgmii_phy);
phylink_start(bp->phylink); rtnl_unlock(); diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 9da0c87f0328..f99c1f7fec40 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -210,11 +210,11 @@ static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, * @hw: pointer to the HW structure. * @module_pointer: module pointer location in words from the NVM beginning * @offset: offset in words from module start - * @words: number of words to write - * @data: buffer with words to write to the Shadow RAM + * @words: number of words to read + * @data: buffer with words to read to the Shadow RAM * @last_command: tells the AdminQ that this is the last command * - * Writes a 16 bit words buffer to the Shadow RAM using the admin command. + * Reads a 16 bit words buffer to the Shadow RAM using the admin command. **/ static int i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, u32 offset, @@ -234,18 +234,18 @@ static int i40e_read_nvm_aq(struct i40e_hw *hw, */ if ((offset + words) > hw->nvm.sr_size) i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write error: offset %d beyond Shadow RAM limit %d\n", + "NVM read error: offset %d beyond Shadow RAM limit %d\n", (offset + words), hw->nvm.sr_size); else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) - /* We can write only up to 4KB (one sector), in one AQ write */ + /* We can read only up to 4KB (one sector), in one AQ write */ i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write fail error: tried to write %d words, limit is %d.\n", + "NVM read fail error: tried to read %d words, limit is %d.\n", words, I40E_SR_SECTOR_SIZE_IN_WORDS); else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) - /* A single write cannot spread over two sectors */ + /* A single read cannot spread over two sectors */ i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", + "NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n", offset, words); else ret_code = i40e_aq_read_nvm(hw, module_pointer, diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 460ca561819a..a34303ad057d 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -1289,6 +1289,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc; fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst; fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos; + fltr->ip_ver = 4; break; case AH_V4_FLOW: case ESP_V4_FLOW: @@ -1300,6 +1301,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst; fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi; fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos; + fltr->ip_ver = 4; break; case IPV4_USER_FLOW: fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src; @@ -1312,6 +1314,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes; fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos; fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto; + fltr->ip_ver = 4; break; case TCP_V6_FLOW: case UDP_V6_FLOW: @@ -1330,6 +1333,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc; fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst; fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass; + fltr->ip_ver = 6; break; case AH_V6_FLOW: case ESP_V6_FLOW: @@ -1345,6 +1349,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe sizeof(struct in6_addr)); fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi; fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass; + fltr->ip_ver = 6; break; case IPV6_USER_FLOW: memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src, @@ -1361,6 +1366,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes; fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass; fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto; + fltr->ip_ver = 6; break; case ETHER_FLOW: fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto; @@ -1371,6 +1377,10 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe return -EINVAL; }
+ err = iavf_validate_fdir_fltr_masks(adapter, fltr); + if (err) + return err; + if (iavf_fdir_is_dup_fltr(adapter, fltr)) return -EEXIST;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c index 505e82ebafe4..03e774bd2a5b 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c @@ -18,6 +18,79 @@ static const struct in6_addr ipv6_addr_full_mask = { } };
+static const struct in6_addr ipv6_addr_zero_mask = { + .in6_u = { + .u6_addr8 = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + } + } +}; + +/** + * iavf_validate_fdir_fltr_masks - validate Flow Director filter fields masks + * @adapter: pointer to the VF adapter structure + * @fltr: Flow Director filter data structure + * + * Returns 0 if all masks of packet fields are either full or empty. Returns + * error on at least one partial mask. + */ +int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter, + struct iavf_fdir_fltr *fltr) +{ + if (fltr->eth_mask.etype && fltr->eth_mask.etype != htons(U16_MAX)) + goto partial_mask; + + if (fltr->ip_ver == 4) { + if (fltr->ip_mask.v4_addrs.src_ip && + fltr->ip_mask.v4_addrs.src_ip != htonl(U32_MAX)) + goto partial_mask; + + if (fltr->ip_mask.v4_addrs.dst_ip && + fltr->ip_mask.v4_addrs.dst_ip != htonl(U32_MAX)) + goto partial_mask; + + if (fltr->ip_mask.tos && fltr->ip_mask.tos != U8_MAX) + goto partial_mask; + } else if (fltr->ip_ver == 6) { + if (memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_zero_mask, + sizeof(struct in6_addr)) && + memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask, + sizeof(struct in6_addr))) + goto partial_mask; + + if (memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_zero_mask, + sizeof(struct in6_addr)) && + memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask, + sizeof(struct in6_addr))) + goto partial_mask; + + if (fltr->ip_mask.tclass && fltr->ip_mask.tclass != U8_MAX) + goto partial_mask; + } + + if (fltr->ip_mask.proto && fltr->ip_mask.proto != U8_MAX) + goto partial_mask; + + if (fltr->ip_mask.src_port && fltr->ip_mask.src_port != htons(U16_MAX)) + goto partial_mask; + + if (fltr->ip_mask.dst_port && fltr->ip_mask.dst_port != htons(U16_MAX)) + goto partial_mask; + + if (fltr->ip_mask.spi && fltr->ip_mask.spi != htonl(U32_MAX)) + goto partial_mask; + + if (fltr->ip_mask.l4_header && + fltr->ip_mask.l4_header != htonl(U32_MAX)) + goto partial_mask; + + return 0; + +partial_mask: + dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, partial masks are not supported\n"); + return -EOPNOTSUPP; +} + /** * iavf_pkt_udp_no_pay_len - the length of UDP packet without payload * @fltr: Flow Director filter data structure @@ -263,8 +336,6 @@ iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr, VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST); }
- fltr->ip_ver = 4; - return 0; }
@@ -309,8 +380,6 @@ iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr, VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST); }
- fltr->ip_ver = 6; - return 0; }
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h index 33c55c366315..9eb9f73f6adf 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h @@ -110,6 +110,8 @@ struct iavf_fdir_fltr { struct virtchnl_fdir_add vc_add_msg; };
+int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter, + struct iavf_fdir_fltr *fltr); int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index f6dd3f8fd936..03e513984946 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -568,6 +568,12 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, break; case DEVLINK_ESWITCH_MODE_SWITCHDEV: { + if (ice_is_adq_active(pf)) { + dev_err(ice_pf_to_dev(pf), "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root"); + NL_SET_ERR_MSG_MOD(extack, "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root"); + return -EOPNOTSUPP; + } + dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev", pf->hw.pf_id); NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev"); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 34e8e7cb1bc5..cfb76612bd2f 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -9065,6 +9065,11 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, ice_setup_tc_block_cb, np, np, true); case TC_SETUP_QDISC_MQPRIO: + if (ice_is_eswitch_mode_switchdev(pf)) { + netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n"); + return -EOPNOTSUPP; + } + if (pf->adev) { mutex_lock(&pf->adev_mutex); device_lock(&pf->adev->dev); diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c index 1cc6af2feb38..565320ec24f8 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c @@ -55,7 +55,7 @@ static int octep_send_mbox_req(struct octep_device *oct, list_add_tail(&d->list, &oct->ctrl_req_wait_list); ret = wait_event_interruptible_timeout(oct->ctrl_req_wait_q, (d->done != 0), - jiffies + msecs_to_jiffies(500)); + msecs_to_jiffies(500)); list_del(&d->list); if (ret == 0 || ret == 1) return -EAGAIN; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 43eb6e871351..4424de2ffd70 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -1038,6 +1038,10 @@ static void octep_device_cleanup(struct octep_device *oct) { int i;
+ oct->poll_non_ioq_intr = false; + cancel_delayed_work_sync(&oct->intr_poll_task); + cancel_work_sync(&oct->ctrl_mbox_task); + dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n");
for (i = 0; i < OCTEP_MAX_VF; i++) { @@ -1200,14 +1204,11 @@ static void octep_remove(struct pci_dev *pdev) if (!oct) return;
- cancel_work_sync(&oct->tx_timeout_task); - cancel_work_sync(&oct->ctrl_mbox_task); netdev = oct->netdev; if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev);
- oct->poll_non_ioq_intr = false; - cancel_delayed_work_sync(&oct->intr_poll_task); + cancel_work_sync(&oct->tx_timeout_task); octep_device_cleanup(oct); pci_release_mem_regions(pdev); free_netdev(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 9e8e6184f9e4..ecfe93a479da 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -84,6 +84,8 @@ enum mlx5e_xdp_xmit_mode { * MLX5E_XDP_XMIT_MODE_XSK: * none. */ +#define MLX5E_XDP_FIFO_ENTRIES2DS_MAX_RATIO 4 + union mlx5e_xdp_info { enum mlx5e_xdp_xmit_mode mode; union { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 7e6d0489854e..975c82df345c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1298,11 +1298,13 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa) { struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); - int entries = wq_sz * MLX5_SEND_WQEBB_NUM_DS * 2; /* upper bound for maximum num of - * entries of all xmit_modes. - */ + int entries; size_t size;
+ /* upper bound for maximum num of entries of all xmit_modes. */ + entries = roundup_pow_of_two(wq_sz * MLX5_SEND_WQEBB_NUM_DS * + MLX5E_XDP_FIFO_ENTRIES2DS_MAX_RATIO); + size = array_size(sizeof(*xdpi_fifo->xi), entries); xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa); if (!xdpi_fifo->xi) diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 96c78f7db254..7441577294ba 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -973,7 +973,7 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc, bool update_tab) { u16 num_entries = MANA_INDIRECT_TABLE_SIZE; - struct mana_cfg_rx_steer_req *req = NULL; + struct mana_cfg_rx_steer_req_v2 *req; struct mana_cfg_rx_steer_resp resp = {}; struct net_device *ndev = apc->ndev; mana_handle_t *req_indir_tab; @@ -988,6 +988,8 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc, mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size, sizeof(resp));
+ req->hdr.req.msg_version = GDMA_MESSAGE_V2; + req->vport = apc->port_handle; req->num_indir_entries = num_entries; req->indir_tab_offset = sizeof(*req); @@ -997,6 +999,7 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc, req->update_hashkey = update_key; req->update_indir_tab = update_tab; req->default_rxobj = apc->default_rxobj; + req->cqe_coalescing_enable = 0;
if (update_key) memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 4b004a728190..99df00c30b8c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -176,6 +176,15 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) } #endif
+static int __maybe_unused qede_suspend(struct device *dev) +{ + dev_info(dev, "Device does not support suspend operation\n"); + + return -EOPNOTSUPP; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(qede_pm_ops, qede_suspend, NULL); + static const struct pci_error_handlers qede_err_handler = { .error_detected = qede_io_error_detected, }; @@ -190,6 +199,7 @@ static struct pci_driver qede_pci_driver = { .sriov_configure = qede_sriov_configure, #endif .err_handler = &qede_err_handler, + .driver.pm = &qede_pm_ops, };
static struct qed_eth_cb_ops qede_ll_ops = { diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 7adde9639c8a..35d8e9811998 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -1194,7 +1194,7 @@ int ef100_probe_netdev_pf(struct efx_nic *efx) net_dev->features |= NETIF_F_HW_TC; efx->fixed_features |= NETIF_F_HW_TC; } - return rc; + return 0; }
int ef100_probe_vf(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c index d7827ab3761f..6c8dfe0a6482 100644 --- a/drivers/net/ethernet/sfc/tc.c +++ b/drivers/net/ethernet/sfc/tc.c @@ -1310,6 +1310,58 @@ void efx_tc_deconfigure_default_rule(struct efx_nic *efx, rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; }
+static int efx_tc_configure_fallback_acts(struct efx_nic *efx, u32 eg_port, + struct efx_tc_action_set_list *acts) +{ + struct efx_tc_action_set *act; + int rc; + + act = kzalloc(sizeof(*act), GFP_KERNEL); + if (!act) + return -ENOMEM; + act->deliver = 1; + act->dest_mport = eg_port; + rc = efx_mae_alloc_action_set(efx, act); + if (rc) + goto fail1; + EFX_WARN_ON_PARANOID(!list_empty(&acts->list)); + list_add_tail(&act->list, &acts->list); + rc = efx_mae_alloc_action_set_list(efx, acts); + if (rc) + goto fail2; + return 0; +fail2: + list_del(&act->list); + efx_mae_free_action_set(efx, act->fw_id); +fail1: + kfree(act); + return rc; +} + +static int efx_tc_configure_fallback_acts_pf(struct efx_nic *efx) +{ + struct efx_tc_action_set_list *acts = &efx->tc->facts.pf; + u32 eg_port; + + efx_mae_mport_uplink(efx, &eg_port); + return efx_tc_configure_fallback_acts(efx, eg_port, acts); +} + +static int efx_tc_configure_fallback_acts_reps(struct efx_nic *efx) +{ + struct efx_tc_action_set_list *acts = &efx->tc->facts.reps; + u32 eg_port; + + efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port); + return efx_tc_configure_fallback_acts(efx, eg_port, acts); +} + +static void efx_tc_deconfigure_fallback_acts(struct efx_nic *efx, + struct efx_tc_action_set_list *acts) +{ + efx_tc_free_action_set_list(efx, acts, true); +} + static int efx_tc_configure_rep_mport(struct efx_nic *efx) { u32 rep_mport_label; @@ -1402,10 +1454,16 @@ int efx_init_tc(struct efx_nic *efx) rc = efx_tc_configure_rep_mport(efx); if (rc) return rc; - efx->tc->up = true; + rc = efx_tc_configure_fallback_acts_pf(efx); + if (rc) + return rc; + rc = efx_tc_configure_fallback_acts_reps(efx); + if (rc) + return rc; rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx); if (rc) return rc; + efx->tc->up = true; return 0; }
@@ -1419,6 +1477,8 @@ void efx_fini_tc(struct efx_nic *efx) efx_tc_deconfigure_rep_mport(efx); efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf); efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire); + efx_tc_deconfigure_fallback_acts(efx, &efx->tc->facts.pf); + efx_tc_deconfigure_fallback_acts(efx, &efx->tc->facts.reps); efx->tc->up = false; }
@@ -1483,6 +1543,10 @@ int efx_init_struct_tc(struct efx_nic *efx) efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list); efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; + INIT_LIST_HEAD(&efx->tc->facts.pf.list); + efx->tc->facts.pf.fw_id = MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL; + INIT_LIST_HEAD(&efx->tc->facts.reps.list); + efx->tc->facts.reps.fw_id = MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL; efx->extra_channel_type[EFX_EXTRA_CHANNEL_TC] = &efx_tc_channel_type; return 0; fail_match_action_ht: @@ -1508,6 +1572,10 @@ void efx_fini_struct_tc(struct efx_nic *efx) MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL); EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL); + EFX_WARN_ON_PARANOID(efx->tc->facts.pf.fw_id != + MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL); + EFX_WARN_ON_PARANOID(efx->tc->facts.reps.fw_id != + MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL); rhashtable_free_and_destroy(&efx->tc->match_action_ht, efx_tc_flow_free, efx); rhashtable_free_and_destroy(&efx->tc->encap_match_ht, diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h index 04cced6a2d39..2b6782e9c722 100644 --- a/drivers/net/ethernet/sfc/tc.h +++ b/drivers/net/ethernet/sfc/tc.h @@ -133,6 +133,11 @@ enum efx_tc_rule_prios { * %EFX_TC_PRIO_DFLT. Named by *ingress* port * @dflt.pf: rule for traffic ingressing from PF (egresses to wire) * @dflt.wire: rule for traffic ingressing from wire (egresses to PF) + * @facts: Fallback action-set-lists for unready rules. Named by *egress* port + * @facts.pf: action-set-list for unready rules on PF netdev, hence applying to + * traffic from wire, and egressing to PF + * @facts.reps: action-set-list for unready rules on representors, hence + * applying to traffic from representees, and egressing to the reps mport * @up: have TC datastructures been set up? */ struct efx_tc_state { @@ -153,6 +158,10 @@ struct efx_tc_state { struct efx_tc_flow_rule pf; struct efx_tc_flow_rule wire; } dflt; + struct { + struct efx_tc_action_set_list pf; + struct efx_tc_action_set_list reps; + } facts; bool up; };
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c index 323bec5e57f8..356099169003 100644 --- a/drivers/net/pcs/pcs-rzn1-miic.c +++ b/drivers/net/pcs/pcs-rzn1-miic.c @@ -313,15 +313,21 @@ struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
pdev = of_find_device_by_node(pcs_np); of_node_put(pcs_np); - if (!pdev || !platform_get_drvdata(pdev)) + if (!pdev || !platform_get_drvdata(pdev)) { + if (pdev) + put_device(&pdev->dev); return ERR_PTR(-EPROBE_DEFER); + }
miic_port = kzalloc(sizeof(*miic_port), GFP_KERNEL); - if (!miic_port) + if (!miic_port) { + put_device(&pdev->dev); return ERR_PTR(-ENOMEM); + }
miic = platform_get_drvdata(pdev); device_link_add(dev, miic->dev, DL_FLAG_AUTOREMOVE_CONSUMER); + put_device(&pdev->dev);
miic_port->miic = miic; miic_port->port = port - 1; diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index ef6dc008e4c5..8a77ec33b417 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -304,7 +304,6 @@ struct at803x_priv { bool is_1000basex; struct regulator_dev *vddio_rdev; struct regulator_dev *vddh_rdev; - struct regulator *vddio; u64 stats[ARRAY_SIZE(at803x_hw_stats)]; };
@@ -460,21 +459,27 @@ static int at803x_set_wol(struct phy_device *phydev, phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i], mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
- /* Enable WOL function */ - ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL, - 0, AT803X_WOL_EN); - if (ret) - return ret; + /* Enable WOL function for 1588 */ + if (phydev->drv->phy_id == ATH8031_PHY_ID) { + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, + AT803X_PHY_MMD3_WOL_CTRL, + 0, AT803X_WOL_EN); + if (ret) + return ret; + } /* Enable WOL interrupt */ ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL); if (ret) return ret; } else { - /* Disable WoL function */ - ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL, - AT803X_WOL_EN, 0); - if (ret) - return ret; + /* Disable WoL function for 1588 */ + if (phydev->drv->phy_id == ATH8031_PHY_ID) { + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, + AT803X_PHY_MMD3_WOL_CTRL, + AT803X_WOL_EN, 0); + if (ret) + return ret; + } /* Disable WOL interrupt */ ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0); if (ret) @@ -509,11 +514,11 @@ static void at803x_get_wol(struct phy_device *phydev, wol->supported = WAKE_MAGIC; wol->wolopts = 0;
- value = phy_read_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL); + value = phy_read(phydev, AT803X_INTR_ENABLE); if (value < 0) return;
- if (value & AT803X_WOL_EN) + if (value & AT803X_INTR_ENABLE_WOL) wol->wolopts |= WAKE_MAGIC; }
@@ -824,11 +829,11 @@ static int at803x_parse_dt(struct phy_device *phydev) if (ret < 0) return ret;
- priv->vddio = devm_regulator_get_optional(&phydev->mdio.dev, - "vddio"); - if (IS_ERR(priv->vddio)) { + ret = devm_regulator_get_enable_optional(&phydev->mdio.dev, + "vddio"); + if (ret) { phydev_err(phydev, "failed to get VDDIO regulator\n"); - return PTR_ERR(priv->vddio); + return ret; }
/* Only AR8031/8033 support 1000Base-X for SFP modules */ @@ -856,23 +861,12 @@ static int at803x_probe(struct phy_device *phydev) if (ret) return ret;
- if (priv->vddio) { - ret = regulator_enable(priv->vddio); - if (ret < 0) - return ret; - } - if (phydev->drv->phy_id == ATH8031_PHY_ID) { int ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG); int mode_cfg; - struct ethtool_wolinfo wol = { - .wolopts = 0, - };
- if (ccr < 0) { - ret = ccr; - goto err; - } + if (ccr < 0) + return ccr; mode_cfg = ccr & AT803X_MODE_CFG_MASK;
switch (mode_cfg) { @@ -886,29 +880,17 @@ static int at803x_probe(struct phy_device *phydev) break; }
- /* Disable WOL by default */ - ret = at803x_set_wol(phydev, &wol); - if (ret < 0) { - phydev_err(phydev, "failed to disable WOL on probe: %d\n", ret); - goto err; - } + /* Disable WoL in 1588 register which is enabled + * by default + */ + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, + AT803X_PHY_MMD3_WOL_CTRL, + AT803X_WOL_EN, 0); + if (ret) + return ret; }
return 0; - -err: - if (priv->vddio) - regulator_disable(priv->vddio); - - return ret; -} - -static void at803x_remove(struct phy_device *phydev) -{ - struct at803x_priv *priv = phydev->priv; - - if (priv->vddio) - regulator_disable(priv->vddio); }
static int at803x_get_features(struct phy_device *phydev) @@ -2021,7 +2003,6 @@ static struct phy_driver at803x_driver[] = { .name = "Qualcomm Atheros AR8035", .flags = PHY_POLL_CABLE_TEST, .probe = at803x_probe, - .remove = at803x_remove, .config_aneg = at803x_config_aneg, .config_init = at803x_config_init, .soft_reset = genphy_soft_reset, @@ -2043,7 +2024,6 @@ static struct phy_driver at803x_driver[] = { .name = "Qualcomm Atheros AR8030", .phy_id_mask = AT8030_PHY_ID_MASK, .probe = at803x_probe, - .remove = at803x_remove, .config_init = at803x_config_init, .link_change_notify = at803x_link_change_notify, .set_wol = at803x_set_wol, @@ -2059,7 +2039,6 @@ static struct phy_driver at803x_driver[] = { .name = "Qualcomm Atheros AR8031/AR8033", .flags = PHY_POLL_CABLE_TEST, .probe = at803x_probe, - .remove = at803x_remove, .config_init = at803x_config_init, .config_aneg = at803x_config_aneg, .soft_reset = genphy_soft_reset, @@ -2082,7 +2061,6 @@ static struct phy_driver at803x_driver[] = { PHY_ID_MATCH_EXACT(ATH8032_PHY_ID), .name = "Qualcomm Atheros AR8032", .probe = at803x_probe, - .remove = at803x_remove, .flags = PHY_POLL_CABLE_TEST, .config_init = at803x_config_init, .link_change_notify = at803x_link_change_notify, @@ -2098,7 +2076,6 @@ static struct phy_driver at803x_driver[] = { PHY_ID_MATCH_EXACT(ATH9331_PHY_ID), .name = "Qualcomm Atheros AR9331 built-in PHY", .probe = at803x_probe, - .remove = at803x_remove, .suspend = at803x_suspend, .resume = at803x_resume, .flags = PHY_POLL_CABLE_TEST, @@ -2115,7 +2092,6 @@ static struct phy_driver at803x_driver[] = { PHY_ID_MATCH_EXACT(QCA9561_PHY_ID), .name = "Qualcomm Atheros QCA9561 built-in PHY", .probe = at803x_probe, - .remove = at803x_remove, .suspend = at803x_suspend, .resume = at803x_resume, .flags = PHY_POLL_CABLE_TEST, @@ -2181,7 +2157,6 @@ static struct phy_driver at803x_driver[] = { .name = "Qualcomm QCA8081", .flags = PHY_POLL_CABLE_TEST, .probe = at803x_probe, - .remove = at803x_remove, .config_intr = at803x_config_intr, .handle_interrupt = at803x_handle_interrupt, .get_tunable = at803x_get_tunable, diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index ad71c88c87e7..f9ad8902100f 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -486,6 +486,17 @@ static int bcm54xx_resume(struct phy_device *phydev) return bcm54xx_config_init(phydev); }
+static int bcm54810_read_mmd(struct phy_device *phydev, int devnum, u16 regnum) +{ + return -EOPNOTSUPP; +} + +static int bcm54810_write_mmd(struct phy_device *phydev, int devnum, u16 regnum, + u16 val) +{ + return -EOPNOTSUPP; +} + static int bcm54811_config_init(struct phy_device *phydev) { int err, reg; @@ -981,6 +992,8 @@ static struct phy_driver broadcom_drivers[] = { .get_strings = bcm_phy_get_strings, .get_stats = bcm54xx_get_stats, .probe = bcm54xx_phy_probe, + .read_mmd = bcm54810_read_mmd, + .write_mmd = bcm54810_write_mmd, .config_init = bcm54xx_config_init, .config_aneg = bcm5481_config_aneg, .config_intr = bcm_phy_config_intr, diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 2c4e6de8f4d9..7958ea0e8714 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -3217,6 +3217,8 @@ static int phy_probe(struct device *dev) goto out; }
+ phy_disable_interrupts(phydev); + /* Start out supporting everything. Eventually, * a controller will attach, and may modify one * or both of these values @@ -3334,16 +3336,6 @@ static int phy_remove(struct device *dev) return 0; }
-static void phy_shutdown(struct device *dev) -{ - struct phy_device *phydev = to_phy_device(dev); - - if (phydev->state == PHY_READY || !phydev->attached_dev) - return; - - phy_disable_interrupts(phydev); -} - /** * phy_driver_register - register a phy_driver with the PHY layer * @new_driver: new phy_driver to register @@ -3377,7 +3369,6 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner) new_driver->mdiodrv.driver.bus = &mdio_bus_type; new_driver->mdiodrv.driver.probe = phy_probe; new_driver->mdiodrv.driver.remove = phy_remove; - new_driver->mdiodrv.driver.shutdown = phy_shutdown; new_driver->mdiodrv.driver.owner = owner; new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index d3dc22509ea5..382756c3fb83 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2200,7 +2200,9 @@ static void team_setup(struct net_device *dev)
dev->hw_features = TEAM_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_FILTER;
dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; dev->features |= dev->hw_features; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index dce9f9d63e04..76019949e3fe 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1071,8 +1071,9 @@ static int __veth_napi_enable_range(struct net_device *dev, int start, int end) err_xdp_ring: for (i--; i >= start; i--) ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free); + i = end; err_page_pool: - for (i = start; i < end; i++) { + for (i--; i >= start; i--) { page_pool_destroy(priv->rq[i].page_pool); priv->rq[i].page_pool = NULL; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 2336a0e4befa..9b310795617c 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2652,7 +2652,7 @@ static void virtnet_init_default_rss(struct virtnet_info *vi) vi->ctrl->rss.indirection_table[i] = indir_val; }
- vi->ctrl->rss.max_tx_vq = vi->curr_queue_pairs; + vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0; vi->ctrl->rss.hash_key_length = vi->rss_key_size;
netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size); @@ -4110,8 +4110,6 @@ static int virtnet_probe(struct virtio_device *vdev) if (vi->has_rss || vi->has_rss_hash_report) virtnet_init_default_rss(vi);
- _virtnet_set_queues(vi, vi->curr_queue_pairs); - /* serialize netdev register + virtio_device_ready() with ndo_open() */ rtnl_lock();
@@ -4124,6 +4122,8 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
+ _virtnet_set_queues(vi, vi->curr_queue_pairs); + /* a random MAC address has been assigned, notify the device. * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there * because many devices work fine without getting MAC explicitly diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index 09825b4a075e..e6eec85480ca 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -223,6 +223,7 @@ #define EP_STATE_ENABLED 1
static const unsigned int pcie_gen_freq[] = { + GEN1_CORE_CLK_FREQ, /* PCI_EXP_LNKSTA_CLS == 0; undefined */ GEN1_CORE_CLK_FREQ, GEN2_CORE_CLK_FREQ, GEN3_CORE_CLK_FREQ, @@ -459,7 +460,11 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) & PCI_EXP_LNKSTA_CLS; - clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]); + + if (speed >= ARRAY_SIZE(pcie_gen_freq)) + speed = 0; + + clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]);
if (pcie->of_data->has_ltr_req_fix) return IRQ_HANDLED; @@ -1020,7 +1025,11 @@ static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) & PCI_EXP_LNKSTA_CLS; - clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]); + + if (speed >= ARRAY_SIZE(pcie_gen_freq)) + speed = 0; + + clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]);
tegra_pcie_enable_interrupts(pp);
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 471e0c5815f3..bf9d070a4496 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c @@ -1053,6 +1053,8 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s) q = p->next; kfree(p); } + + kfree(data); }
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index c5f52d4f7781..1fb0a24356bf 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -1039,6 +1039,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct msm_pinctrl *pctrl = gpiochip_get_data(gc); const struct msm_pingroup *g; + u32 intr_target_mask = GENMASK(2, 0); unsigned long flags; bool was_enabled; u32 val; @@ -1075,13 +1076,15 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) * With intr_target_use_scm interrupts are routed to * application cpu using scm calls. */ + if (g->intr_target_width) + intr_target_mask = GENMASK(g->intr_target_width - 1, 0); + if (pctrl->intr_target_use_scm) { u32 addr = pctrl->phys_base[0] + g->intr_target_reg; int ret;
qcom_scm_io_readl(addr, &val); - - val &= ~(7 << g->intr_target_bit); + val &= ~(intr_target_mask << g->intr_target_bit); val |= g->intr_target_kpss_val << g->intr_target_bit;
ret = qcom_scm_io_writel(addr, val); @@ -1091,7 +1094,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) d->hwirq); } else { val = msm_readl_intr_target(pctrl, g); - val &= ~(7 << g->intr_target_bit); + val &= ~(intr_target_mask << g->intr_target_bit); val |= g->intr_target_kpss_val << g->intr_target_bit; msm_writel_intr_target(val, pctrl, g); } diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h index 985eceda2517..7f30416be127 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.h +++ b/drivers/pinctrl/qcom/pinctrl-msm.h @@ -51,6 +51,7 @@ struct msm_function { * @intr_status_bit: Offset in @intr_status_reg for reading and acking the interrupt * status. * @intr_target_bit: Offset in @intr_target_reg for configuring the interrupt routing. + * @intr_target_width: Number of bits used for specifying interrupt routing target. * @intr_target_kpss_val: Value in @intr_target_bit for specifying that the interrupt from * this gpio should get routed to the KPSS processor. * @intr_raw_status_bit: Offset in @intr_cfg_reg for the raw status bit. @@ -94,6 +95,7 @@ struct msm_pingroup { unsigned intr_ack_high:1;
unsigned intr_target_bit:5; + unsigned intr_target_width:5; unsigned intr_target_kpss_val:5; unsigned intr_raw_status_bit:5; unsigned intr_polarity_bit:5; diff --git a/drivers/pinctrl/qcom/pinctrl-sa8775p.c b/drivers/pinctrl/qcom/pinctrl-sa8775p.c index 2ae7cdca65d3..62f7a36d290c 100644 --- a/drivers/pinctrl/qcom/pinctrl-sa8775p.c +++ b/drivers/pinctrl/qcom/pinctrl-sa8775p.c @@ -54,6 +54,7 @@ .intr_enable_bit = 0, \ .intr_status_bit = 0, \ .intr_target_bit = 5, \ + .intr_target_width = 4, \ .intr_target_kpss_val = 3, \ .intr_raw_status_bit = 4, \ .intr_polarity_bit = 1, \ diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c index dfd5ec9f75c9..a0621665a6d2 100644 --- a/drivers/regulator/da9063-regulator.c +++ b/drivers/regulator/da9063-regulator.c @@ -778,9 +778,6 @@ static int da9063_check_xvp_constraints(struct regulator_config *config) const struct notification_limit *uv_l = &constr->under_voltage_limits; const struct notification_limit *ov_l = &constr->over_voltage_limits;
- if (!config->init_data) /* No config in DT, pointers will be invalid */ - return 0; - /* make sure that only one severity is used to clarify if unchanged, enabled or disabled */ if ((!!uv_l->prot + !!uv_l->err + !!uv_l->warn) > 1) { dev_err(config->dev, "%s: at most one voltage monitoring severity allowed!\n", @@ -1031,9 +1028,12 @@ static int da9063_regulator_probe(struct platform_device *pdev) config.of_node = da9063_reg_matches[id].of_node; config.regmap = da9063->regmap;
- ret = da9063_check_xvp_constraints(&config); - if (ret) - return ret; + /* Checking constraints requires init_data from DT. */ + if (config.init_data) { + ret = da9063_check_xvp_constraints(&config); + if (ret) + return ret; + }
regl->rdev = devm_regulator_register(&pdev->dev, ®l->desc, &config); diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c index f3b280af0773..cd077b7c4aff 100644 --- a/drivers/regulator/qcom-rpmh-regulator.c +++ b/drivers/regulator/qcom-rpmh-regulator.c @@ -1068,7 +1068,7 @@ static const struct rpmh_vreg_init_data pm8550_vreg_data[] = { RPMH_VREG("ldo9", "ldo%s9", &pmic5_pldo, "vdd-l8-l9"), RPMH_VREG("ldo10", "ldo%s10", &pmic5_nldo515, "vdd-l1-l4-l10"), RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo515, "vdd-l11"), - RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo, "vdd-l12"), + RPMH_VREG("ldo12", "ldo%s12", &pmic5_nldo515, "vdd-l12"), RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l2-l13-l14"), RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo, "vdd-l2-l13-l14"), RPMH_VREG("ldo15", "ldo%s15", &pmic5_nldo515, "vdd-l15"), diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c index 1ca140356a08..3f759121dc00 100644 --- a/drivers/soc/aspeed/aspeed-socinfo.c +++ b/drivers/soc/aspeed/aspeed-socinfo.c @@ -137,6 +137,7 @@ static int __init aspeed_socinfo_init(void)
soc_dev = soc_device_register(attrs); if (IS_ERR(soc_dev)) { + kfree(attrs->machine); kfree(attrs->soc_id); kfree(attrs->serial_number); kfree(attrs); diff --git a/drivers/soc/aspeed/aspeed-uart-routing.c b/drivers/soc/aspeed/aspeed-uart-routing.c index ef8b24fd1851..59123e1f27ac 100644 --- a/drivers/soc/aspeed/aspeed-uart-routing.c +++ b/drivers/soc/aspeed/aspeed-uart-routing.c @@ -524,7 +524,7 @@ static ssize_t aspeed_uart_routing_store(struct device *dev, struct aspeed_uart_routing_selector *sel = to_routing_selector(attr); int val;
- val = match_string(sel->options, -1, buf); + val = __sysfs_match_string(sel->options, -1, buf); if (val < 0) { dev_err(dev, "invalid value "%s"\n", buf); return -EINVAL; diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index e58beac44295..1257d1c41f8e 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -1480,6 +1480,8 @@ static struct pci_device_id nhi_ids[] = { .driver_data = (kernel_ulong_t)&icl_nhi_ops }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1), .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) },
/* Any USB4 compliant host */ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) }, diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h index b0718020c6f5..0f029ce75882 100644 --- a/drivers/thunderbolt/nhi.h +++ b/drivers/thunderbolt/nhi.h @@ -75,6 +75,10 @@ extern const struct tb_nhi_ops icl_nhi_ops; #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef #define PCI_DEVICE_ID_INTEL_ADL_NHI0 0x463e #define PCI_DEVICE_ID_INTEL_ADL_NHI1 0x466d +#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI 0x5781 +#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI 0x5784 +#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_80G_BRIDGE 0x5786 +#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE 0x57a4 #define PCI_DEVICE_ID_INTEL_MTL_M_NHI0 0x7eb2 #define PCI_DEVICE_ID_INTEL_MTL_P_NHI0 0x7ec2 #define PCI_DEVICE_ID_INTEL_MTL_P_NHI1 0x7ec3 diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c index 1157b8869bcc..8c2ee431fcde 100644 --- a/drivers/thunderbolt/quirks.c +++ b/drivers/thunderbolt/quirks.c @@ -74,6 +74,14 @@ static const struct tb_quirk tb_quirks[] = { quirk_usb3_maximum_bandwidth }, { 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI1, 0x0000, 0x0000, quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_80G_BRIDGE, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, /* * CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms. */ diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c index 9cc28197dbc4..edbd92435b41 100644 --- a/drivers/thunderbolt/retimer.c +++ b/drivers/thunderbolt/retimer.c @@ -187,6 +187,21 @@ static ssize_t nvm_authenticate_show(struct device *dev, return ret; }
+static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status) +{ + int i; + + tb_port_dbg(port, "reading NVM authentication status of retimers\n"); + + /* + * Before doing anything else, read the authentication status. + * If the retimer has it set, store it for the new retimer + * device instance. + */ + for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) + usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]); +} + static void tb_retimer_set_inbound_sbtx(struct tb_port *port) { int i; @@ -455,18 +470,16 @@ int tb_retimer_scan(struct tb_port *port, bool add) return ret;
/* - * Enable sideband channel for each retimer. We can do this - * regardless whether there is device connected or not. + * Immediately after sending enumerate retimers read the + * authentication status of each retimer. */ - tb_retimer_set_inbound_sbtx(port); + tb_retimer_nvm_authenticate_status(port, status);
/* - * Before doing anything else, read the authentication status. - * If the retimer has it set, store it for the new retimer - * device instance. + * Enable sideband channel for each retimer. We can do this + * regardless whether there is device connected or not. */ - for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) - usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]); + tb_retimer_set_inbound_sbtx(port);
for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) { /* diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 1cdefac4dd1b..739f522cb893 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -3042,12 +3042,13 @@ static void gsm_error(struct gsm_mux *gsm) static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc) { int i; - struct gsm_dlci *dlci = gsm->dlci[0]; + struct gsm_dlci *dlci; struct gsm_msg *txq, *ntxq;
gsm->dead = true; mutex_lock(&gsm->mutex);
+ dlci = gsm->dlci[0]; if (dlci) { if (disc && dlci->state != DLCI_CLOSED) { gsm_dlci_begin_close(dlci); diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 053d44412e42..0a67dff575f7 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -3288,6 +3288,7 @@ void serial8250_init_port(struct uart_8250_port *up) struct uart_port *port = &up->port;
spin_lock_init(&port->lock); + port->pm = NULL; port->ops = &serial8250_pops; port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index f38606b75096..3e4992b28113 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -1137,8 +1137,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport) unsigned long sr = lpuart32_read(&sport->port, UARTSTAT);
if (sr & (UARTSTAT_PE | UARTSTAT_FE)) { - /* Read DR to clear the error flags */ - lpuart32_read(&sport->port, UARTDATA); + /* Clear the error flags */ + lpuart32_write(&sport->port, sr, UARTSTAT);
if (sr & UARTSTAT_PE) sport->port.icount.parity++; diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index 1e38fc9b10c1..e9e11a259621 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -1755,13 +1755,10 @@ static int stm32_usart_serial_remove(struct platform_device *pdev) struct uart_port *port = platform_get_drvdata(pdev); struct stm32_port *stm32_port = to_stm32_port(port); const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; - int err; u32 cr3;
pm_runtime_get_sync(&pdev->dev); - err = uart_remove_one_port(&stm32_usart_driver, port); - if (err) - return(err); + uart_remove_one_port(&stm32_usart_driver, port);
pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index 2855ac303001..f7577f2bd2c5 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -70,6 +70,10 @@ static const struct ci_hdrc_imx_platform_flag imx7ulp_usb_data = { CI_HDRC_PMQOS, };
+static const struct ci_hdrc_imx_platform_flag imx8ulp_usb_data = { + .flags = CI_HDRC_SUPPORTS_RUNTIME_PM, +}; + static const struct of_device_id ci_hdrc_imx_dt_ids[] = { { .compatible = "fsl,imx23-usb", .data = &imx23_usb_data}, { .compatible = "fsl,imx28-usb", .data = &imx28_usb_data}, @@ -80,6 +84,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = { { .compatible = "fsl,imx6ul-usb", .data = &imx6ul_usb_data}, { .compatible = "fsl,imx7d-usb", .data = &imx7d_usb_data}, { .compatible = "fsl,imx7ulp-usb", .data = &imx7ulp_usb_data}, + { .compatible = "fsl,imx8ulp-usb", .data = &imx8ulp_usb_data}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids); diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c index c57c1a71a513..681c2ddc83fa 100644 --- a/drivers/usb/chipidea/usbmisc_imx.c +++ b/drivers/usb/chipidea/usbmisc_imx.c @@ -135,7 +135,7 @@ #define TXVREFTUNE0_MASK (0xf << 20)
#define MX6_USB_OTG_WAKEUP_BITS (MX6_BM_WAKEUP_ENABLE | MX6_BM_VBUS_WAKEUP | \ - MX6_BM_ID_WAKEUP) + MX6_BM_ID_WAKEUP | MX6SX_BM_DPDM_WAKEUP_EN)
struct usbmisc_ops { /* It's called once when probe a usb device */ @@ -152,6 +152,7 @@ struct usbmisc_ops { int (*charger_detection)(struct imx_usbmisc_data *data); /* It's called when system resume from usb power lost */ int (*power_lost_check)(struct imx_usbmisc_data *data); + void (*vbus_comparator_on)(struct imx_usbmisc_data *data, bool on); };
struct imx_usbmisc { @@ -875,6 +876,33 @@ static int imx7d_charger_detection(struct imx_usbmisc_data *data) return ret; }
+static void usbmisc_imx7d_vbus_comparator_on(struct imx_usbmisc_data *data, + bool on) +{ + unsigned long flags; + struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev); + u32 val; + + if (data->hsic) + return; + + spin_lock_irqsave(&usbmisc->lock, flags); + /* + * Disable VBUS valid comparator when in suspend mode, + * when OTG is disabled and DRVVBUS0 is asserted case + * the Bandgap circuitry and VBUS Valid comparator are + * still powered, even in Suspend or Sleep mode. + */ + val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2); + if (on) + val |= MX7D_USB_OTG_PHY_CFG2_DRVVBUS0; + else + val &= ~MX7D_USB_OTG_PHY_CFG2_DRVVBUS0; + + writel(val, usbmisc->base + MX7D_USB_OTG_PHY_CFG2); + spin_unlock_irqrestore(&usbmisc->lock, flags); +} + static int usbmisc_imx7ulp_init(struct imx_usbmisc_data *data) { struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev); @@ -1018,6 +1046,7 @@ static const struct usbmisc_ops imx7d_usbmisc_ops = { .set_wakeup = usbmisc_imx7d_set_wakeup, .charger_detection = imx7d_charger_detection, .power_lost_check = usbmisc_imx7d_power_lost_check, + .vbus_comparator_on = usbmisc_imx7d_vbus_comparator_on, };
static const struct usbmisc_ops imx7ulp_usbmisc_ops = { @@ -1132,6 +1161,9 @@ int imx_usbmisc_suspend(struct imx_usbmisc_data *data, bool wakeup)
usbmisc = dev_get_drvdata(data->dev);
+ if (usbmisc->ops->vbus_comparator_on) + usbmisc->ops->vbus_comparator_on(data, false); + if (wakeup && usbmisc->ops->set_wakeup) ret = usbmisc->ops->set_wakeup(data, true); if (ret) { @@ -1185,6 +1217,9 @@ int imx_usbmisc_resume(struct imx_usbmisc_data *data, bool wakeup) goto hsic_set_clk_fail; }
+ if (usbmisc->ops->vbus_comparator_on) + usbmisc->ops->vbus_comparator_on(data, true); + return 0;
hsic_set_clk_fail: diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index e5d522d54f6a..97f07757d19e 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -916,8 +916,11 @@ static void __gs_console_push(struct gs_console *cons) }
req->length = size; + + spin_unlock_irq(&cons->lock); if (usb_ep_queue(ep, req, GFP_ATOMIC)) req->length = 0; + spin_lock_irq(&cons->lock); }
static void gs_console_work(struct work_struct *work) diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c index dd1c6b2ca7c6..e81865978299 100644 --- a/drivers/usb/gadget/function/uvc_video.c +++ b/drivers/usb/gadget/function/uvc_video.c @@ -386,6 +386,9 @@ static void uvcg_video_pump(struct work_struct *work) struct uvc_buffer *buf; unsigned long flags; int ret; + bool buf_int; + /* video->max_payload_size is only set when using bulk transfer */ + bool is_bulk = video->max_payload_size;
while (video->ep->enabled) { /* @@ -408,20 +411,35 @@ static void uvcg_video_pump(struct work_struct *work) */ spin_lock_irqsave(&queue->irqlock, flags); buf = uvcg_queue_head(queue); - if (buf == NULL) { + + if (buf != NULL) { + video->encode(req, video, buf); + /* Always interrupt for the last request of a video buffer */ + buf_int = buf->state == UVC_BUF_STATE_DONE; + } else if (!(queue->flags & UVC_QUEUE_DISCONNECTED) && !is_bulk) { + /* + * No video buffer available; the queue is still connected and + * we're traferring over ISOC. Queue a 0 length request to + * prevent missed ISOC transfers. + */ + req->length = 0; + buf_int = false; + } else { + /* + * Either queue has been disconnected or no video buffer + * available to bulk transfer. Either way, stop processing + * further. + */ spin_unlock_irqrestore(&queue->irqlock, flags); break; }
- video->encode(req, video, buf); - /* * With usb3 we have more requests. This will decrease the * interrupt load to a quarter but also catches the corner * cases, which needs to be handled. */ - if (list_empty(&video->req_free) || - buf->state == UVC_BUF_STATE_DONE || + if (list_empty(&video->req_free) || buf_int || !(video->req_int_count % DIV_ROUND_UP(video->uvc_num_requests, 4))) { video->req_int_count = 0; @@ -441,8 +459,7 @@ static void uvcg_video_pump(struct work_struct *work)
/* Endpoint now owns the request */ req = NULL; - if (buf->state != UVC_BUF_STATE_DONE) - video->req_int_count++; + video->req_int_count++; }
if (!req) @@ -527,4 +544,3 @@ int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc) V4L2_BUF_TYPE_VIDEO_OUTPUT, &video->mutex); return 0; } - diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c index 91ce97821de5..7c2047755083 100644 --- a/drivers/usb/host/xhci-histb.c +++ b/drivers/usb/host/xhci-histb.c @@ -164,16 +164,6 @@ static void xhci_histb_host_disable(struct xhci_hcd_histb *histb) clk_disable_unprepare(histb->bus_clk); }
-static void xhci_histb_quirks(struct device *dev, struct xhci_hcd *xhci) -{ - /* - * As of now platform drivers don't provide MSI support so we ensure - * here that the generic code does not try to make a pci_dev from our - * dev struct in order to setup MSI - */ - xhci->quirks |= XHCI_PLAT; -} - /* called during probe() after chip reset completes */ static int xhci_histb_setup(struct usb_hcd *hcd) { @@ -186,7 +176,7 @@ static int xhci_histb_setup(struct usb_hcd *hcd) return ret; }
- return xhci_gen_setup(hcd, xhci_histb_quirks); + return xhci_gen_setup(hcd, NULL); }
static const struct xhci_driver_overrides xhci_histb_overrides __initconst = { diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index b60521e1a9a6..9a40da3b0064 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -418,12 +418,6 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci) struct usb_hcd *hcd = xhci_to_hcd(xhci); struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
- /* - * As of now platform drivers don't provide MSI support so we ensure - * here that the generic code does not try to make a pci_dev from our - * dev struct in order to setup MSI - */ - xhci->quirks |= XHCI_PLAT; xhci->quirks |= XHCI_MTK_HOST; /* * MTK host controller gives a spurious successful event after a diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index db9826c38b20..9540f0e48c21 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -108,9 +108,6 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci) struct usb_hcd *hcd = xhci_to_hcd(xhci); struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- if (xhci->quirks & XHCI_PLAT) - return; - /* return if using legacy interrupt */ if (hcd->irq > 0) return; @@ -208,10 +205,6 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd) struct pci_dev *pdev; int ret;
- /* The xhci platform device has set up IRQs through usb_add_hcd. */ - if (xhci->quirks & XHCI_PLAT) - return 0; - pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); /* * Some Fresco Logic host controllers advertise MSI, but fail to diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index f36633fa8362..80da67a6c3bf 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -78,12 +78,7 @@ static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci) { struct xhci_plat_priv *priv = xhci_to_priv(xhci);
- /* - * As of now platform drivers don't provide MSI support so we ensure - * here that the generic code does not try to make a pci_dev from our - * dev struct in order to setup MSI - */ - xhci->quirks |= XHCI_PLAT | priv->quirks; + xhci->quirks |= priv->quirks; }
/* called during probe() after chip reset completes */ diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index d28fa892c286..07a319db5803 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -2662,7 +2662,6 @@ static void tegra_xhci_quirks(struct device *dev, struct xhci_hcd *xhci) { struct tegra_xusb *tegra = dev_get_drvdata(dev);
- xhci->quirks |= XHCI_PLAT; if (tegra && tegra->soc->lpm_support) xhci->quirks |= XHCI_LPM_SUPPORT; } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 4474d540f6b4..0b1928851a2a 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1874,7 +1874,7 @@ struct xhci_hcd { #define XHCI_SPURIOUS_REBOOT BIT_ULL(13) #define XHCI_COMP_MODE_QUIRK BIT_ULL(14) #define XHCI_AVOID_BEI BIT_ULL(15) -#define XHCI_PLAT BIT_ULL(16) +#define XHCI_PLAT BIT_ULL(16) /* Deprecated */ #define XHCI_SLOW_SUSPEND BIT_ULL(17) #define XHCI_SPURIOUS_WAKEUP BIT_ULL(18) /* For controllers with a broken beyond repair streams implementation */ diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h index 25fc4120b618..b53420e874ac 100644 --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h @@ -31,6 +31,7 @@ struct mlx5_vdpa_mr { struct list_head head; unsigned long num_directs; unsigned long num_klms; + /* state of dvq mr */ bool initialized;
/* serialize mkey creation and destruction */ @@ -121,6 +122,7 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, unsigned int asid); void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev); +void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid);
#define mlx5_vdpa_warn(__dev, format, ...) \ dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__, \ diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index 03e543229791..5a1971fcd87b 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -489,60 +489,103 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr } }
-void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev) +static void _mlx5_vdpa_destroy_cvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid) +{ + if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid) + return; + + prune_iotlb(mvdev); +} + +static void _mlx5_vdpa_destroy_dvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid) { struct mlx5_vdpa_mr *mr = &mvdev->mr;
- mutex_lock(&mr->mkey_mtx); + if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid) + return; + if (!mr->initialized) - goto out; + return;
- prune_iotlb(mvdev); if (mr->user_mr) destroy_user_mr(mvdev, mr); else destroy_dma_mr(mvdev, mr);
mr->initialized = false; -out: +} + +void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid) +{ + struct mlx5_vdpa_mr *mr = &mvdev->mr; + + mutex_lock(&mr->mkey_mtx); + + _mlx5_vdpa_destroy_dvq_mr(mvdev, asid); + _mlx5_vdpa_destroy_cvq_mr(mvdev, asid); + mutex_unlock(&mr->mkey_mtx); }
-static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, - struct vhost_iotlb *iotlb, unsigned int asid) +void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev) +{ + mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_CVQ_GROUP]); + mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]); +} + +static int _mlx5_vdpa_create_cvq_mr(struct mlx5_vdpa_dev *mvdev, + struct vhost_iotlb *iotlb, + unsigned int asid) +{ + if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid) + return 0; + + return dup_iotlb(mvdev, iotlb); +} + +static int _mlx5_vdpa_create_dvq_mr(struct mlx5_vdpa_dev *mvdev, + struct vhost_iotlb *iotlb, + unsigned int asid) { struct mlx5_vdpa_mr *mr = &mvdev->mr; int err;
- if (mr->initialized) + if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid) return 0;
- if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) { - if (iotlb) - err = create_user_mr(mvdev, iotlb); - else - err = create_dma_mr(mvdev, mr); + if (mr->initialized) + return 0;
- if (err) - return err; - } + if (iotlb) + err = create_user_mr(mvdev, iotlb); + else + err = create_dma_mr(mvdev, mr);
- if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid) { - err = dup_iotlb(mvdev, iotlb); - if (err) - goto out_err; - } + if (err) + return err;
mr->initialized = true; + + return 0; +} + +static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, + struct vhost_iotlb *iotlb, unsigned int asid) +{ + int err; + + err = _mlx5_vdpa_create_dvq_mr(mvdev, iotlb, asid); + if (err) + return err; + + err = _mlx5_vdpa_create_cvq_mr(mvdev, iotlb, asid); + if (err) + goto out_err; + return 0;
out_err: - if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) { - if (iotlb) - destroy_user_mr(mvdev, mr); - else - destroy_dma_mr(mvdev, mr); - } + _mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
return err; } diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 279ac6a558d2..f18a9301ab94 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -2564,7 +2564,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, goto err_mr;
teardown_driver(ndev); - mlx5_vdpa_destroy_mr(mvdev); + mlx5_vdpa_destroy_mr_asid(mvdev, asid); err = mlx5_vdpa_create_mr(mvdev, iotlb, asid); if (err) goto err_mr; @@ -2580,7 +2580,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, return 0;
err_setup: - mlx5_vdpa_destroy_mr(mvdev); + mlx5_vdpa_destroy_mr_asid(mvdev, asid); err_mr: return err; } diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index 965e32529eb8..a7612e0783b3 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -1247,44 +1247,41 @@ static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = { [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING }, [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING }, [VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR, + [VDPA_ATTR_DEV_NET_CFG_MAX_VQP] = { .type = NLA_U16 }, /* virtio spec 1.1 section 5.1.4.1 for valid MTU range */ [VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68), + [VDPA_ATTR_DEV_QUEUE_INDEX] = { .type = NLA_U32 }, + [VDPA_ATTR_DEV_FEATURES] = { .type = NLA_U64 }, };
static const struct genl_ops vdpa_nl_ops[] = { { .cmd = VDPA_CMD_MGMTDEV_GET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = vdpa_nl_cmd_mgmtdev_get_doit, .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit, }, { .cmd = VDPA_CMD_DEV_NEW, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = vdpa_nl_cmd_dev_add_set_doit, .flags = GENL_ADMIN_PERM, }, { .cmd = VDPA_CMD_DEV_DEL, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = vdpa_nl_cmd_dev_del_set_doit, .flags = GENL_ADMIN_PERM, }, { .cmd = VDPA_CMD_DEV_GET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = vdpa_nl_cmd_dev_get_doit, .dumpit = vdpa_nl_cmd_dev_get_dumpit, }, { .cmd = VDPA_CMD_DEV_CONFIG_GET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = vdpa_nl_cmd_dev_config_get_doit, .dumpit = vdpa_nl_cmd_dev_config_get_dumpit, }, { .cmd = VDPA_CMD_DEV_VSTATS_GET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = vdpa_nl_cmd_dev_stats_get_doit, .flags = GENL_ADMIN_PERM, }, diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index 0d84e6a9c3cc..76d4ab451f59 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -935,10 +935,10 @@ static void vduse_dev_irq_inject(struct work_struct *work) { struct vduse_dev *dev = container_of(work, struct vduse_dev, inject);
- spin_lock_irq(&dev->irq_lock); + spin_lock_bh(&dev->irq_lock); if (dev->config_cb.callback) dev->config_cb.callback(dev->config_cb.private); - spin_unlock_irq(&dev->irq_lock); + spin_unlock_bh(&dev->irq_lock); }
static void vduse_vq_irq_inject(struct work_struct *work) @@ -946,10 +946,10 @@ static void vduse_vq_irq_inject(struct work_struct *work) struct vduse_virtqueue *vq = container_of(work, struct vduse_virtqueue, inject);
- spin_lock_irq(&vq->irq_lock); + spin_lock_bh(&vq->irq_lock); if (vq->ready && vq->cb.callback) vq->cb.callback(vq->cb.private); - spin_unlock_irq(&vq->irq_lock); + spin_unlock_bh(&vq->irq_lock); }
static bool vduse_vq_signal_irqfd(struct vduse_virtqueue *vq) diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c index 51fbf02a0343..76b50b6c98ad 100644 --- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c +++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c @@ -519,7 +519,9 @@ static int mmphw_probe(struct platform_device *pdev) "unable to get clk %s\n", mi->clk_name); goto failed; } - clk_prepare_enable(ctrl->clk); + ret = clk_prepare_enable(ctrl->clk); + if (ret) + goto failed;
/* init global regs */ ctrl_set_default(ctrl); diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index a46a4a29e929..97760f611295 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -607,9 +607,8 @@ static void virtio_mmio_release_dev(struct device *_d) struct virtio_device *vdev = container_of(_d, struct virtio_device, dev); struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); - struct platform_device *pdev = vm_dev->pdev;
- devm_kfree(&pdev->dev, vm_dev); + kfree(vm_dev); }
/* Platform device */ @@ -620,7 +619,7 @@ static int virtio_mmio_probe(struct platform_device *pdev) unsigned long magic; int rc;
- vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL); + vm_dev = kzalloc(sizeof(*vm_dev), GFP_KERNEL); if (!vm_dev) return -ENOMEM;
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c index 989e2d7184ce..961161da5900 100644 --- a/drivers/virtio/virtio_vdpa.c +++ b/drivers/virtio/virtio_vdpa.c @@ -393,11 +393,13 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs, cb.callback = virtio_vdpa_config_cb; cb.private = vd_dev; ops->set_config_cb(vdpa, &cb); + kfree(masks);
return 0;
err_setup_vq: virtio_vdpa_del_vqs(vdev); + kfree(masks); return err; }
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c index 14f8d8d90920..2bd3dc25cb03 100644 --- a/drivers/watchdog/sp5100_tco.c +++ b/drivers/watchdog/sp5100_tco.c @@ -96,7 +96,7 @@ static enum tco_reg_layout tco_reg_layout(struct pci_dev *dev) sp5100_tco_pci->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS && sp5100_tco_pci->revision >= AMD_ZEN_SMBUS_PCI_REV) { return efch_mmio; - } else if (dev->vendor == PCI_VENDOR_ID_AMD && + } else if ((dev->vendor == PCI_VENDOR_ID_AMD || dev->vendor == PCI_VENDOR_ID_HYGON) && ((dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS && dev->revision >= 0x41) || (dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS && @@ -579,6 +579,8 @@ static const struct pci_device_id sp5100_tco_pci_tbl[] = { PCI_ANY_ID, }, { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, PCI_ANY_ID, + PCI_ANY_ID, }, { 0, }, /* End of list */ }; MODULE_DEVICE_TABLE(pci, sp5100_tco_pci_tbl); diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 2a60033d907b..a250afa655d5 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1670,6 +1670,10 @@ void btrfs_mark_bg_unused(struct btrfs_block_group *bg) btrfs_get_block_group(bg); trace_btrfs_add_unused_block_group(bg); list_add_tail(&bg->bg_list, &fs_info->unused_bgs); + } else if (!test_bit(BLOCK_GROUP_FLAG_NEW, &bg->runtime_flags)) { + /* Pull out the block group from the reclaim_bgs list. */ + trace_btrfs_add_unused_block_group(bg); + list_move_tail(&bg->bg_list, &fs_info->unused_bgs); } spin_unlock(&fs_info->unused_bgs_lock); } @@ -2693,6 +2697,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) next: btrfs_delayed_refs_rsv_release(fs_info, 1); list_del_init(&block_group->bg_list); + clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags); } btrfs_trans_release_chunk_metadata(trans); } @@ -2732,6 +2737,13 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran if (!cache) return ERR_PTR(-ENOMEM);
+ /* + * Mark it as new before adding it to the rbtree of block groups or any + * list, so that no other task finds it and calls btrfs_mark_bg_unused() + * before the new flag is set. + */ + set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags); + cache->length = size; set_free_space_tree_thresholds(cache); cache->flags = type; diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 471f591db7c0..0852f6c101f8 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -70,6 +70,11 @@ enum btrfs_block_group_flags { BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, /* Indicate that the block group is placed on a sequential zone */ BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, + /* + * Indicate that block group is in the list of new block groups of a + * transaction. + */ + BLOCK_GROUP_FLAG_NEW, };
enum btrfs_caching_type { diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 4c1986cd5bed..cff98526679e 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -443,6 +443,7 @@ struct btrfs_drop_extents_args {
struct btrfs_file_private { void *filldir_buf; + u64 last_index; struct extent_state *llseek_cached_state; };
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 6b457b010cbc..6d51db066503 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1632,6 +1632,7 @@ int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode) }
bool btrfs_readdir_get_delayed_items(struct inode *inode, + u64 last_index, struct list_head *ins_list, struct list_head *del_list) { @@ -1651,14 +1652,14 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
mutex_lock(&delayed_node->mutex); item = __btrfs_first_delayed_insertion_item(delayed_node); - while (item) { + while (item && item->index <= last_index) { refcount_inc(&item->refs); list_add_tail(&item->readdir_list, ins_list); item = __btrfs_next_delayed_item(item); }
item = __btrfs_first_delayed_deletion_item(delayed_node); - while (item) { + while (item && item->index <= last_index) { refcount_inc(&item->refs); list_add_tail(&item->readdir_list, del_list); item = __btrfs_next_delayed_item(item); diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h index 4f21daa3dbc7..dc1085b2a397 100644 --- a/fs/btrfs/delayed-inode.h +++ b/fs/btrfs/delayed-inode.h @@ -148,6 +148,7 @@ void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info);
/* Used for readdir() */ bool btrfs_readdir_get_delayed_items(struct inode *inode, + u64 last_index, struct list_head *ins_list, struct list_head *del_list); void btrfs_readdir_put_delayed_items(struct inode *inode, diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 54eed5a8a412..00f260c8bd60 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -962,7 +962,30 @@ static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl, size -= len; pg_offset += len; disk_bytenr += len; - bio_ctrl->len_to_oe_boundary -= len; + + /* + * len_to_oe_boundary defaults to U32_MAX, which isn't page or + * sector aligned. alloc_new_bio() then sets it to the end of + * our ordered extent for writes into zoned devices. + * + * When len_to_oe_boundary is tracking an ordered extent, we + * trust the ordered extent code to align things properly, and + * the check above to cap our write to the ordered extent + * boundary is correct. + * + * When len_to_oe_boundary is U32_MAX, the cap above would + * result in a 4095 byte IO for the last page right before + * we hit the bio limit of UINT_MAX. bio_add_page() has all + * the checks required to make sure we don't overflow the bio, + * and we should just ignore len_to_oe_boundary completely + * unless we're using it to track an ordered extent. + * + * It's pretty hard to make a bio sized U32_MAX, but it can + * happen when the page cache is able to feed us contiguous + * pages for large extents. + */ + if (bio_ctrl->len_to_oe_boundary != U32_MAX) + bio_ctrl->len_to_oe_boundary -= len;
/* Ordered extent boundary: move on to a new bio. */ if (bio_ctrl->len_to_oe_boundary == 0) diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 138afa955370..367ed73cb6c7 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -758,8 +758,6 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { start = em_end; - if (end != (u64)-1) - len = start + len - em_end; goto next; }
@@ -827,8 +825,8 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end, if (!split) goto remove_em; } - split->start = start + len; - split->len = em_end - (start + len); + split->start = end; + split->len = em_end - end; split->block_start = em->block_start; split->flags = flags; split->compress_type = em->compress_type; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ace949bc7505..a446965d701d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5744,6 +5744,74 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, return d_splice_alias(inode, dentry); }
+/* + * Find the highest existing sequence number in a directory and then set the + * in-memory index_cnt variable to the first free sequence number. + */ +static int btrfs_set_inode_index_count(struct btrfs_inode *inode) +{ + struct btrfs_root *root = inode->root; + struct btrfs_key key, found_key; + struct btrfs_path *path; + struct extent_buffer *leaf; + int ret; + + key.objectid = btrfs_ino(inode); + key.type = BTRFS_DIR_INDEX_KEY; + key.offset = (u64)-1; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + /* FIXME: we should be able to handle this */ + if (ret == 0) + goto out; + ret = 0; + + if (path->slots[0] == 0) { + inode->index_cnt = BTRFS_DIR_START_INDEX; + goto out; + } + + path->slots[0]--; + + leaf = path->nodes[0]; + btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); + + if (found_key.objectid != btrfs_ino(inode) || + found_key.type != BTRFS_DIR_INDEX_KEY) { + inode->index_cnt = BTRFS_DIR_START_INDEX; + goto out; + } + + inode->index_cnt = found_key.offset + 1; +out: + btrfs_free_path(path); + return ret; +} + +static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index) +{ + if (dir->index_cnt == (u64)-1) { + int ret; + + ret = btrfs_inode_delayed_dir_index_count(dir); + if (ret) { + ret = btrfs_set_inode_index_count(dir); + if (ret) + return ret; + } + } + + *index = dir->index_cnt; + + return 0; +} + /* * All this infrastructure exists because dir_emit can fault, and we are holding * the tree lock when doing readdir. For now just allocate a buffer and copy @@ -5756,10 +5824,17 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, static int btrfs_opendir(struct inode *inode, struct file *file) { struct btrfs_file_private *private; + u64 last_index; + int ret; + + ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index); + if (ret) + return ret;
private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL); if (!private) return -ENOMEM; + private->last_index = last_index; private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!private->filldir_buf) { kfree(private); @@ -5826,7 +5901,8 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
INIT_LIST_HEAD(&ins_list); INIT_LIST_HEAD(&del_list); - put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list); + put = btrfs_readdir_get_delayed_items(inode, private->last_index, + &ins_list, &del_list);
again: key.type = BTRFS_DIR_INDEX_KEY; @@ -5844,6 +5920,8 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) break; if (found_key.offset < ctx->pos) continue; + if (found_key.offset > private->last_index) + break; if (btrfs_should_delete_dir_index(&del_list, found_key.offset)) continue; di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); @@ -5979,57 +6057,6 @@ static int btrfs_update_time(struct inode *inode, struct timespec64 *now, return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0; }
-/* - * find the highest existing sequence number in a directory - * and then set the in-memory index_cnt variable to reflect - * free sequence numbers - */ -static int btrfs_set_inode_index_count(struct btrfs_inode *inode) -{ - struct btrfs_root *root = inode->root; - struct btrfs_key key, found_key; - struct btrfs_path *path; - struct extent_buffer *leaf; - int ret; - - key.objectid = btrfs_ino(inode); - key.type = BTRFS_DIR_INDEX_KEY; - key.offset = (u64)-1; - - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - - ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - if (ret < 0) - goto out; - /* FIXME: we should be able to handle this */ - if (ret == 0) - goto out; - ret = 0; - - if (path->slots[0] == 0) { - inode->index_cnt = BTRFS_DIR_START_INDEX; - goto out; - } - - path->slots[0]--; - - leaf = path->nodes[0]; - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - - if (found_key.objectid != btrfs_ino(inode) || - found_key.type != BTRFS_DIR_INDEX_KEY) { - inode->index_cnt = BTRFS_DIR_START_INDEX; - goto out; - } - - inode->index_cnt = found_key.offset + 1; -out: - btrfs_free_path(path); - return ret; -} - /* * helper to find a free sequence number in a given directory. This current * code is very simple, later versions will do smarter things in the btree diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 16c228344cbb..2feb7f229423 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -655,7 +655,8 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr btrfs_stack_header_bytenr(header), logical); return; } - if (memcmp(header->fsid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE) != 0) { + if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid, + BTRFS_FSID_SIZE) != 0) { bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); btrfs_warn_rl(fs_info, diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 436e15e3759d..30977f10e36b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4631,8 +4631,7 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) } }
- BUG_ON(fs_info->balance_ctl || - test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); + ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); atomic_dec(&fs_info->balance_cancel_req); mutex_unlock(&fs_info->balance_mutex); return 0; diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 83c4abff496d..5fb367b1d4b0 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -645,6 +645,7 @@ static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg, err = -EIO; out_bad: pr_err("mds parse_reply err %d\n", err); + ceph_msg_dump(msg); return err; }
@@ -3538,6 +3539,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
bad: pr_err("mdsc_handle_forward decode error err=%d\n", err); + ceph_msg_dump(msg); }
static int __decode_session_metadata(void **p, void *end, @@ -5258,6 +5260,7 @@ void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) bad: pr_err("error decoding fsmap %d. Shutting down mount.\n", err); ceph_umount_begin(mdsc->fsc->sb); + ceph_msg_dump(msg); err_out: mutex_lock(&mdsc->mutex); mdsc->mdsmap_err = err; @@ -5326,6 +5329,7 @@ void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) bad: pr_err("error decoding mdsmap %d. Shutting down mount.\n", err); ceph_umount_begin(mdsc->fsc->sb); + ceph_msg_dump(msg); return; }
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index a84bf6444bba..204ba7f8417e 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -1004,7 +1004,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root) { struct gfs2_sbd *sdp = root->d_sb->s_fs_info; struct gfs2_args *args = &sdp->sd_args; - int val; + unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum; + + spin_lock(&sdp->sd_tune.gt_spin); + logd_secs = sdp->sd_tune.gt_logd_secs; + quota_quantum = sdp->sd_tune.gt_quota_quantum; + statfs_quantum = sdp->sd_tune.gt_statfs_quantum; + statfs_slow = sdp->sd_tune.gt_statfs_slow; + spin_unlock(&sdp->sd_tune.gt_spin);
if (is_ancestor(root, sdp->sd_master_dir)) seq_puts(s, ",meta"); @@ -1059,17 +1066,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root) } if (args->ar_discard) seq_puts(s, ",discard"); - val = sdp->sd_tune.gt_logd_secs; - if (val != 30) - seq_printf(s, ",commit=%d", val); - val = sdp->sd_tune.gt_statfs_quantum; - if (val != 30) - seq_printf(s, ",statfs_quantum=%d", val); - else if (sdp->sd_tune.gt_statfs_slow) + if (logd_secs != 30) + seq_printf(s, ",commit=%d", logd_secs); + if (statfs_quantum != 30) + seq_printf(s, ",statfs_quantum=%d", statfs_quantum); + else if (statfs_slow) seq_puts(s, ",statfs_quantum=0"); - val = sdp->sd_tune.gt_quota_quantum; - if (val != 60) - seq_printf(s, ",quota_quantum=%d", val); + if (quota_quantum != 60) + seq_printf(s, ",quota_quantum=%d", quota_quantum); if (args->ar_statfs_percent) seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent); if (args->ar_errors != GFS2_ERRORS_DEFAULT) { diff --git a/fs/netfs/iterator.c b/fs/netfs/iterator.c index 8a4c86687429..facb84f262dc 100644 --- a/fs/netfs/iterator.c +++ b/fs/netfs/iterator.c @@ -151,7 +151,7 @@ static ssize_t netfs_extract_user_to_sg(struct iov_iter *iter,
failed: while (sgtable->nents > sgtable->orig_nents) - put_page(sg_page(&sgtable->sgl[--sgtable->nents])); + unpin_user_page(sg_page(&sgtable->sgl[--sgtable->nents])); return res; }
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c index 2bfcf1a989c9..50214b77c6a3 100644 --- a/fs/ntfs3/frecord.c +++ b/fs/ntfs3/frecord.c @@ -874,6 +874,7 @@ int ni_create_attr_list(struct ntfs_inode *ni) if (err) goto out1;
+ err = -EINVAL; /* Call mi_remove_attr() in reverse order to keep pointers 'arr_move' valid. */ while (to_free > 0) { struct ATTRIB *b = arr_move[--nb]; @@ -882,7 +883,8 @@ int ni_create_attr_list(struct ntfs_inode *ni)
attr = mi_insert_attr(mi, b->type, Add2Ptr(b, name_off), b->name_len, asize, name_off); - WARN_ON(!attr); + if (!attr) + goto out1;
mi_get_ref(mi, &le_b[nb]->ref); le_b[nb]->id = attr->id; @@ -892,17 +894,20 @@ int ni_create_attr_list(struct ntfs_inode *ni) attr->id = le_b[nb]->id;
/* Remove from primary record. */ - WARN_ON(!mi_remove_attr(NULL, &ni->mi, b)); + if (!mi_remove_attr(NULL, &ni->mi, b)) + goto out1;
if (to_free <= asize) break; to_free -= asize; - WARN_ON(!nb); + if (!nb) + goto out1; }
attr = mi_insert_attr(&ni->mi, ATTR_LIST, NULL, 0, lsize + SIZEOF_RESIDENT, SIZEOF_RESIDENT); - WARN_ON(!attr); + if (!attr) + goto out1;
attr->non_res = 0; attr->flags = 0; @@ -922,9 +927,10 @@ int ni_create_attr_list(struct ntfs_inode *ni) kfree(ni->attr_list.le); ni->attr_list.le = NULL; ni->attr_list.size = 0; + return err;
out: - return err; + return 0; }
/* diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c index 28cc421102e5..21567e58265c 100644 --- a/fs/ntfs3/fsntfs.c +++ b/fs/ntfs3/fsntfs.c @@ -178,7 +178,7 @@ int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes, /* Check errors. */ if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- || fn * SECTOR_SIZE > bytes) { - return -EINVAL; /* Native chkntfs returns ok! */ + return -E_NTFS_CORRUPT; }
/* Get fixup pointer. */ diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c index 0a48d2d67219..b40da258e684 100644 --- a/fs/ntfs3/index.c +++ b/fs/ntfs3/index.c @@ -1113,6 +1113,12 @@ int indx_read(struct ntfs_index *indx, struct ntfs_inode *ni, CLST vbn, *node = in;
out: + if (err == -E_NTFS_CORRUPT) { + ntfs_inode_err(&ni->vfs_inode, "directory corrupted"); + ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR); + err = -EINVAL; + } + if (ib != in->index) kfree(ib);
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h index eb01f7e76479..2e4be773728d 100644 --- a/fs/ntfs3/ntfs_fs.h +++ b/fs/ntfs3/ntfs_fs.h @@ -53,6 +53,8 @@ enum utf16_endian; #define E_NTFS_NONRESIDENT 556 /* NTFS specific error code about punch hole. */ #define E_NTFS_NOTALIGNED 557 +/* NTFS specific error code when on-disk struct is corrupted. */ +#define E_NTFS_CORRUPT 558
/* sbi->flags */ diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c index 2a281cead2bc..7974ca35a15c 100644 --- a/fs/ntfs3/record.c +++ b/fs/ntfs3/record.c @@ -124,7 +124,7 @@ int mi_read(struct mft_inode *mi, bool is_mft) struct rw_semaphore *rw_lock = NULL;
if (is_mounted(sbi)) { - if (!is_mft) { + if (!is_mft && mft_ni) { rw_lock = &mft_ni->file.run_lock; down_read(rw_lock); } @@ -148,7 +148,7 @@ int mi_read(struct mft_inode *mi, bool is_mft) ni_lock(mft_ni); down_write(rw_lock); } - err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, &mft_ni->file.run, + err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, run, vbo >> sbi->cluster_bits); if (rw_lock) { up_write(rw_lock); @@ -180,6 +180,12 @@ int mi_read(struct mft_inode *mi, bool is_mft) return 0;
out: + if (err == -E_NTFS_CORRUPT) { + ntfs_err(sbi->sb, "mft corrupted"); + ntfs_set_state(sbi, NTFS_DIRTY_ERROR); + err = -EINVAL; + } + return err; }
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c index 5158dd31fd97..ecf899d571d8 100644 --- a/fs/ntfs3/super.c +++ b/fs/ntfs3/super.c @@ -724,6 +724,8 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, struct MFT_REC *rec; u16 fn, ao; u8 cluster_bits; + u32 boot_off = 0; + const char *hint = "Primary boot";
sbi->volume.blocks = dev_size >> PAGE_SHIFT;
@@ -731,11 +733,12 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, if (!bh) return -EIO;
+check_boot: err = -EINVAL; - boot = (struct NTFS_BOOT *)bh->b_data; + boot = (struct NTFS_BOOT *)Add2Ptr(bh->b_data, boot_off);
if (memcmp(boot->system_id, "NTFS ", sizeof("NTFS ") - 1)) { - ntfs_err(sb, "Boot's signature is not NTFS."); + ntfs_err(sb, "%s signature is not NTFS.", hint); goto out; }
@@ -748,14 +751,16 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, boot->bytes_per_sector[0]; if (boot_sector_size < SECTOR_SIZE || !is_power_of_2(boot_sector_size)) { - ntfs_err(sb, "Invalid bytes per sector %u.", boot_sector_size); + ntfs_err(sb, "%s: invalid bytes per sector %u.", hint, + boot_sector_size); goto out; }
/* cluster size: 512, 1K, 2K, 4K, ... 2M */ sct_per_clst = true_sectors_per_clst(boot); if ((int)sct_per_clst < 0 || !is_power_of_2(sct_per_clst)) { - ntfs_err(sb, "Invalid sectors per cluster %u.", sct_per_clst); + ntfs_err(sb, "%s: invalid sectors per cluster %u.", hint, + sct_per_clst); goto out; }
@@ -771,8 +776,8 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, if (mlcn * sct_per_clst >= sectors || mlcn2 * sct_per_clst >= sectors) { ntfs_err( sb, - "Start of MFT 0x%llx (0x%llx) is out of volume 0x%llx.", - mlcn, mlcn2, sectors); + "%s: start of MFT 0x%llx (0x%llx) is out of volume 0x%llx.", + hint, mlcn, mlcn2, sectors); goto out; }
@@ -784,7 +789,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
/* Check MFT record size. */ if (record_size < SECTOR_SIZE || !is_power_of_2(record_size)) { - ntfs_err(sb, "Invalid bytes per MFT record %u (%d).", + ntfs_err(sb, "%s: invalid bytes per MFT record %u (%d).", hint, record_size, boot->record_size); goto out; } @@ -801,13 +806,13 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
/* Check index record size. */ if (sbi->index_size < SECTOR_SIZE || !is_power_of_2(sbi->index_size)) { - ntfs_err(sb, "Invalid bytes per index %u(%d).", sbi->index_size, - boot->index_size); + ntfs_err(sb, "%s: invalid bytes per index %u(%d).", hint, + sbi->index_size, boot->index_size); goto out; }
if (sbi->index_size > MAXIMUM_BYTES_PER_INDEX) { - ntfs_err(sb, "Unsupported bytes per index %u.", + ntfs_err(sb, "%s: unsupported bytes per index %u.", hint, sbi->index_size); goto out; } @@ -834,7 +839,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
/* Compare boot's cluster and sector. */ if (sbi->cluster_size < boot_sector_size) { - ntfs_err(sb, "Invalid bytes per cluster (%u).", + ntfs_err(sb, "%s: invalid bytes per cluster (%u).", hint, sbi->cluster_size); goto out; } @@ -930,7 +935,46 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
err = 0;
+ if (bh->b_blocknr && !sb_rdonly(sb)) { + /* + * Alternative boot is ok but primary is not ok. + * Update primary boot. + */ + struct buffer_head *bh0 = sb_getblk(sb, 0); + if (bh0) { + if (buffer_locked(bh0)) + __wait_on_buffer(bh0); + + lock_buffer(bh0); + memcpy(bh0->b_data, boot, sizeof(*boot)); + set_buffer_uptodate(bh0); + mark_buffer_dirty(bh0); + unlock_buffer(bh0); + if (!sync_dirty_buffer(bh0)) + ntfs_warn(sb, "primary boot is updated"); + put_bh(bh0); + } + } + out: + if (err == -EINVAL && !bh->b_blocknr && dev_size > PAGE_SHIFT) { + u32 block_size = min_t(u32, sector_size, PAGE_SIZE); + u64 lbo = dev_size - sizeof(*boot); + + /* + * Try alternative boot (last sector) + */ + brelse(bh); + + sb_set_blocksize(sb, block_size); + bh = ntfs_bread(sb, lbo >> blksize_bits(block_size)); + if (!bh) + return -EINVAL; + + boot_off = lbo & (block_size - 1); + hint = "Alternative boot"; + goto check_boot; + } brelse(bh);
return err; @@ -955,6 +999,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc) struct ATTR_DEF_ENTRY *t; u16 *shared; struct MFT_REF ref; + bool ro = sb_rdonly(sb);
ref.high = 0;
@@ -1035,6 +1080,10 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc) sbi->volume.minor_ver = info->minor_ver; sbi->volume.flags = info->flags; sbi->volume.ni = ni; + if (info->flags & VOLUME_FLAG_DIRTY) { + sbi->volume.real_dirty = true; + ntfs_info(sb, "It is recommened to use chkdsk."); + }
/* Load $MFTMirr to estimate recs_mirr. */ ref.low = cpu_to_le32(MFT_REC_MIRR); @@ -1069,21 +1118,16 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
iput(inode);
- if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) { - if (!sb_rdonly(sb)) { - ntfs_warn(sb, - "failed to replay log file. Can't mount rw!"); - err = -EINVAL; - goto out; - } - } else if (sbi->volume.flags & VOLUME_FLAG_DIRTY) { - if (!sb_rdonly(sb) && !options->force) { - ntfs_warn( - sb, - "volume is dirty and "force" flag is not set!"); - err = -EINVAL; - goto out; - } + if ((sbi->flags & NTFS_FLAGS_NEED_REPLAY) && !ro) { + ntfs_warn(sb, "failed to replay log file. Can't mount rw!"); + err = -EINVAL; + goto out; + } + + if ((sbi->volume.flags & VOLUME_FLAG_DIRTY) && !ro && !options->force) { + ntfs_warn(sb, "volume is dirty and "force" flag is not set!"); + err = -EINVAL; + goto out; }
/* Load $MFT. */ @@ -1173,7 +1217,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
bad_len += len; bad_frags += 1; - if (sb_rdonly(sb)) + if (ro) continue;
if (wnd_set_used_safe(&sbi->used.bitmap, lcn, len, &tt) || tt) { diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c index fd02fcf4d409..26787c2bbf75 100644 --- a/fs/ntfs3/xattr.c +++ b/fs/ntfs3/xattr.c @@ -141,6 +141,7 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
memset(Add2Ptr(ea_p, size), 0, add_bytes);
+ err = -EINVAL; /* Check all attributes for consistency. */ for (off = 0; off < size; off += ea_size) { const struct EA_FULL *ef = Add2Ptr(ea_p, off); diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c index ed0f71137584..d14e88e14fb2 100644 --- a/fs/smb/client/cifs_debug.c +++ b/fs/smb/client/cifs_debug.c @@ -153,6 +153,11 @@ cifs_dump_channel(struct seq_file *m, int i, struct cifs_chan *chan) in_flight(server), atomic_read(&server->in_send), atomic_read(&server->num_waiters)); +#ifdef CONFIG_NET_NS + if (server->net) + seq_printf(m, " Net namespace: %u ", server->net->ns.inum); +#endif /* NET_NS */ + }
static inline const char *smb_speed_to_str(size_t bps) @@ -429,10 +434,15 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) server->reconnect_instance, server->srv_count, server->sec_mode, in_flight(server)); +#ifdef CONFIG_NET_NS + if (server->net) + seq_printf(m, " Net namespace: %u ", server->net->ns.inum); +#endif /* NET_NS */
seq_printf(m, "\nIn Send: %d In MaxReq Wait: %d", atomic_read(&server->in_send), atomic_read(&server->num_waiters)); + if (server->leaf_fullpath) { seq_printf(m, "\nDFS leaf full path: %s", server->leaf_fullpath); diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c index 43a4d8603db3..30b03938f6d1 100644 --- a/fs/smb/client/cifsfs.c +++ b/fs/smb/client/cifsfs.c @@ -884,11 +884,11 @@ struct dentry * cifs_smb3_do_mount(struct file_system_type *fs_type, int flags, struct smb3_fs_context *old_ctx) { - int rc; - struct super_block *sb = NULL; - struct cifs_sb_info *cifs_sb = NULL; struct cifs_mnt_data mnt_data; + struct cifs_sb_info *cifs_sb; + struct super_block *sb; struct dentry *root; + int rc;
if (cifsFYI) { cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__, @@ -897,11 +897,9 @@ cifs_smb3_do_mount(struct file_system_type *fs_type, cifs_info("Attempting to mount %s\n", old_ctx->source); }
- cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL); - if (cifs_sb == NULL) { - root = ERR_PTR(-ENOMEM); - goto out; - } + cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL); + if (!cifs_sb) + return ERR_PTR(-ENOMEM);
cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL); if (!cifs_sb->ctx) { @@ -938,10 +936,8 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data); if (IS_ERR(sb)) { - root = ERR_CAST(sb); cifs_umount(cifs_sb); - cifs_sb = NULL; - goto out; + return ERR_CAST(sb); }
if (sb->s_root) { @@ -972,13 +968,9 @@ cifs_smb3_do_mount(struct file_system_type *fs_type, deactivate_locked_super(sb); return root; out: - if (cifs_sb) { - if (!sb || IS_ERR(sb)) { /* otherwise kill_sb will handle */ - kfree(cifs_sb->prepath); - smb3_cleanup_fs_context(cifs_sb->ctx); - kfree(cifs_sb); - } - } + kfree(cifs_sb->prepath); + smb3_cleanup_fs_context(cifs_sb->ctx); + kfree(cifs_sb); return root; }
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index d554bca7e07e..855454ff6ced 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -4681,9 +4681,9 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
io_error: kunmap(page); - unlock_page(page);
read_complete: + unlock_page(page); return rc; }
@@ -4878,9 +4878,11 @@ void cifs_oplock_break(struct work_struct *work) struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, oplock_break); struct inode *inode = d_inode(cfile->dentry); + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifsInodeInfo *cinode = CIFS_I(inode); - struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); - struct TCP_Server_Info *server = tcon->ses->server; + struct cifs_tcon *tcon; + struct TCP_Server_Info *server; + struct tcon_link *tlink; int rc = 0; bool purge_cache = false, oplock_break_cancelled; __u64 persistent_fid, volatile_fid; @@ -4889,6 +4891,12 @@ void cifs_oplock_break(struct work_struct *work) wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, TASK_UNINTERRUPTIBLE);
+ tlink = cifs_sb_tlink(cifs_sb); + if (IS_ERR(tlink)) + goto out; + tcon = tlink_tcon(tlink); + server = tcon->ses->server; + server->ops->downgrade_oplock(server, cinode, cfile->oplock_level, cfile->oplock_epoch, &purge_cache);
@@ -4938,18 +4946,19 @@ void cifs_oplock_break(struct work_struct *work) /* * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require * an acknowledgment to be sent when the file has already been closed. - * check for server null, since can race with kill_sb calling tree disconnect. */ spin_lock(&cinode->open_file_lock); - if (tcon->ses && tcon->ses->server && !oplock_break_cancelled && - !list_empty(&cinode->openFileList)) { + /* check list empty since can race with kill_sb calling tree disconnect */ + if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) { spin_unlock(&cinode->open_file_lock); - rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid, - volatile_fid, net_fid, cinode); + rc = server->ops->oplock_response(tcon, persistent_fid, + volatile_fid, net_fid, cinode); cifs_dbg(FYI, "Oplock release rc = %d\n", rc); } else spin_unlock(&cinode->open_file_lock);
+ cifs_put_tlink(tlink); +out: cifs_done_oplock_break(cinode); }
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c index 4946a0c59600..67e16c2ac90e 100644 --- a/fs/smb/client/fs_context.c +++ b/fs/smb/client/fs_context.c @@ -231,6 +231,8 @@ cifs_parse_security_flavors(struct fs_context *fc, char *value, struct smb3_fs_c break; case Opt_sec_none: ctx->nullauth = 1; + kfree(ctx->username); + ctx->username = NULL; break; default: cifs_errorf(fc, "bad security option: %s\n", value); @@ -1201,6 +1203,8 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, case Opt_user: kfree(ctx->username); ctx->username = NULL; + if (ctx->nullauth) + break; if (strlen(param->string) == 0) { /* null user, ie. anonymous authentication */ ctx->nullauth = 1; diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c index 17fe212ab895..e04766fe6f80 100644 --- a/fs/smb/client/smb2pdu.c +++ b/fs/smb/client/smb2pdu.c @@ -3797,6 +3797,12 @@ void smb2_reconnect_server(struct work_struct *work)
spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { + spin_lock(&ses->ses_lock); + if (ses->ses_status == SES_EXITING) { + spin_unlock(&ses->ses_lock); + continue; + } + spin_unlock(&ses->ses_lock);
tcon_selected = false;
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 571885d32907..70ae6c290bdc 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -61,15 +61,9 @@ struct std_timing { u8 vfreq_aspect; } __attribute__((packed));
-#define DRM_EDID_PT_SYNC_MASK (3 << 3) -# define DRM_EDID_PT_ANALOG_CSYNC (0 << 3) -# define DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC (1 << 3) -# define DRM_EDID_PT_DIGITAL_CSYNC (2 << 3) -# define DRM_EDID_PT_CSYNC_ON_RGB (1 << 1) /* analog csync only */ -# define DRM_EDID_PT_CSYNC_SERRATE (1 << 2) -# define DRM_EDID_PT_DIGITAL_SEPARATE_SYNC (3 << 3) -# define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1) /* also digital csync */ -# define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2) +#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1) +#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2) +#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3) #define DRM_EDID_PT_STEREO (1 << 5) #define DRM_EDID_PT_INTERLACED (1 << 7)
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h index 2c8860e406bd..0417360a6db9 100644 --- a/include/linux/iopoll.h +++ b/include/linux/iopoll.h @@ -53,6 +53,7 @@ } \ if (__sleep_us) \ usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + cpu_relax(); \ } \ (cond) ? 0 : -ETIMEDOUT; \ }) @@ -95,6 +96,7 @@ } \ if (__delay_us) \ udelay(__delay_us); \ + cpu_relax(); \ } \ (cond) ? 0 : -ETIMEDOUT; \ }) diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index bdf8de2cdd93..7b4dd69555e4 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -155,6 +155,10 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, if (gso_type & SKB_GSO_UDP) nh_off -= thlen;
+ /* Kernel has a special handling for GSO_BY_FRAGS. */ + if (gso_size == GSO_BY_FRAGS) + return -EINVAL; + /* Too small packets are not really GSO ones. */ if (skb->len - nh_off > gso_size) { shinfo->gso_size = gso_size; diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h index bb9de6a899e0..d6c8eb2b5201 100644 --- a/include/media/v4l2-mem2mem.h +++ b/include/media/v4l2-mem2mem.h @@ -593,7 +593,14 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, static inline unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) { - return m2m_ctx->out_q_ctx.num_rdy; + unsigned int num_buf_rdy; + unsigned long flags; + + spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy; + spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + + return num_buf_rdy; }
/** @@ -605,7 +612,14 @@ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) static inline unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) { - return m2m_ctx->cap_q_ctx.num_rdy; + unsigned int num_buf_rdy; + unsigned long flags; + + spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); + num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy; + spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); + + return num_buf_rdy; }
/** diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index 9eef19972845..024ad8ddb27e 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -579,7 +579,7 @@ struct mana_fence_rq_resp { }; /* HW DATA */
/* Configure vPort Rx Steering */ -struct mana_cfg_rx_steer_req { +struct mana_cfg_rx_steer_req_v2 { struct gdma_req_hdr hdr; mana_handle_t vport; u16 num_indir_entries; @@ -592,6 +592,8 @@ struct mana_cfg_rx_steer_req { u8 reserved; mana_handle_t default_rxobj; u8 hashkey[MANA_HASH_KEY_SIZE]; + u8 cqe_coalescing_enable; + u8 reserved2[7]; }; /* HW DATA */
struct mana_cfg_rx_steer_resp { diff --git a/include/net/sock.h b/include/net/sock.h index ad468fe71413..415f3840a26a 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1421,6 +1421,12 @@ static inline bool sk_has_memory_pressure(const struct sock *sk) return sk->sk_prot->memory_pressure != NULL; }
+static inline bool sk_under_global_memory_pressure(const struct sock *sk) +{ + return sk->sk_prot->memory_pressure && + !!*sk->sk_prot->memory_pressure; +} + static inline bool sk_under_memory_pressure(const struct sock *sk) { if (!sk->sk_prot->memory_pressure) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 151ca95dd08d..363c7d510554 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1984,6 +1984,7 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x) if (dev->xfrmdev_ops->xdo_dev_state_free) dev->xfrmdev_ops->xdo_dev_state_free(x); xso->dev = NULL; + xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; netdev_put(dev, &xso->dev_tracker); } } diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c index b4526668072e..27596f3b4aef 100644 --- a/kernel/dma/remap.c +++ b/kernel/dma/remap.c @@ -43,13 +43,13 @@ void *dma_common_contiguous_remap(struct page *page, size_t size, void *vaddr; int i;
- pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL); + pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); if (!pages) return NULL; for (i = 0; i < count; i++) pages[i] = nth_page(page, i); vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); - kfree(pages); + kvfree(pages);
return vaddr; } diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 99634b29a8b8..46b4a3c7c3bf 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -538,6 +538,7 @@ struct trace_buffer { unsigned flags; int cpus; atomic_t record_disabled; + atomic_t resizing; cpumask_var_t cpumask;
struct lock_class_key *reader_lock_key; @@ -2166,7 +2167,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
/* prevent another thread from changing buffer sizes */ mutex_lock(&buffer->mutex); - + atomic_inc(&buffer->resizing);
if (cpu_id == RING_BUFFER_ALL_CPUS) { /* @@ -2321,6 +2322,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, atomic_dec(&buffer->record_disabled); }
+ atomic_dec(&buffer->resizing); mutex_unlock(&buffer->mutex); return 0;
@@ -2341,6 +2343,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, } } out_err_unlock: + atomic_dec(&buffer->resizing); mutex_unlock(&buffer->mutex); return err; } @@ -5543,6 +5546,15 @@ int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, if (local_read(&cpu_buffer_b->committing)) goto out_dec;
+ /* + * When resize is in progress, we cannot swap it because + * it will mess the state of the cpu buffer. + */ + if (atomic_read(&buffer_a->resizing)) + goto out_dec; + if (atomic_read(&buffer_b->resizing)) + goto out_dec; + buffer_a->buffers[cpu] = cpu_buffer_b; buffer_b->buffers[cpu] = cpu_buffer_a;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c80ff6f5b2cc..fd051f85efd4 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1928,9 +1928,10 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) * place on this CPU. We fail to record, but we reset * the max trace buffer (no one writes directly to it) * and flag that it failed. + * Another reason is resize is in progress. */ trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, - "Failed to swap buffers due to commit in progress\n"); + "Failed to swap buffers due to commit or resize in progress\n"); }
WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index c5e8798e297c..17ca13e8c044 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -6374,9 +6374,14 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn, if (!chan) goto done;
+ chan = l2cap_chan_hold_unless_zero(chan); + if (!chan) + goto done; + l2cap_chan_lock(chan); l2cap_chan_del(chan, ECONNREFUSED); l2cap_chan_unlock(chan); + l2cap_chan_put(chan);
done: mutex_unlock(&conn->chan_lock); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 1e07d0f28972..d4498037fadc 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -7285,7 +7285,7 @@ static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
bt_dev_dbg(hdev, "err %d", err);
- memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr)); + memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
status = mgmt_status(err); if (status == MGMT_STATUS_SUCCESS) { diff --git a/net/core/sock.c b/net/core/sock.c index 1f31a97100d4..8451a95266bf 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3107,7 +3107,7 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount) if (mem_cgroup_sockets_enabled && sk->sk_memcg) mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
- if (sk_under_memory_pressure(sk) && + if (sk_under_global_memory_pressure(sk) && (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) sk_leave_memory_pressure(sk); } diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 53bfd8af6920..d1e7d0ceb7ed 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -287,12 +287,12 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
switch (skb->protocol) { case htons(ETH_P_IP): - xfrm_decode_session(skb, &fl, AF_INET); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + xfrm_decode_session(skb, &fl, AF_INET); break; case htons(ETH_P_IPV6): - xfrm_decode_session(skb, &fl, AF_INET6); memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + xfrm_decode_session(skb, &fl, AF_INET6); break; default: goto tx_err; diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 39eb947fe392..366c3c25ebe2 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -586,7 +586,9 @@ void tcp_retransmit_timer(struct sock *sk) tcp_stream_is_thin(tp) && icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { icsk->icsk_backoff = 0; - icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX); + icsk->icsk_rto = clamp(__tcp_set_rto(tp), + tcp_rto_min(sk), + TCP_RTO_MAX); } else { /* Use normal (exponential) backoff */ icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 10b222865d46..73c85d4e0e9c 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -568,12 +568,12 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) vti6_addr_conflict(t, ipv6_hdr(skb))) goto tx_err;
- xfrm_decode_session(skb, &fl, AF_INET6); memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + xfrm_decode_session(skb, &fl, AF_INET6); break; case htons(ETH_P_IP): - xfrm_decode_session(skb, &fl, AF_INET); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + xfrm_decode_session(skb, &fl, AF_INET); break; default: goto tx_err; diff --git a/net/key/af_key.c b/net/key/af_key.c index 31ab12fd720a..203131ad0dfe 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1848,9 +1848,9 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
- if ((xfilter->sadb_x_filter_splen >= + if ((xfilter->sadb_x_filter_splen > (sizeof(xfrm_address_t) << 3)) || - (xfilter->sadb_x_filter_dplen >= + (xfilter->sadb_x_filter_dplen > (sizeof(xfrm_address_t) << 3))) { mutex_unlock(&pfk->dump_lock); return -EINVAL; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 62606fb44d02..4bb0d90eca1c 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1876,6 +1876,7 @@ static int proc_do_sync_threshold(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { + struct netns_ipvs *ipvs = table->extra2; int *valp = table->data; int val[2]; int rc; @@ -1885,6 +1886,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write, .mode = table->mode, };
+ mutex_lock(&ipvs->sync_mutex); memcpy(val, valp, sizeof(val)); rc = proc_dointvec(&tmp, write, buffer, lenp, ppos); if (write) { @@ -1894,6 +1896,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write, else memcpy(valp, val, sizeof(val)); } + mutex_unlock(&ipvs->sync_mutex); return rc; }
@@ -4321,6 +4324,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs) ipvs->sysctl_sync_threshold[0] = DEFAULT_SYNC_THRESHOLD; ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD; tbl[idx].data = &ipvs->sysctl_sync_threshold; + tbl[idx].extra2 = ipvs; tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold); ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD; tbl[idx++].data = &ipvs->sysctl_sync_refresh_period; diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 91eacc9b0b98..b6bcc8f2f46b 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -49,8 +49,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS, [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS, [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS, - [SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000, - [SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000, + [SCTP_CONNTRACK_SHUTDOWN_SENT] = 3 SECS, + [SCTP_CONNTRACK_SHUTDOWN_RECD] = 3 SECS, [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS, [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS, }; @@ -105,7 +105,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = { { /* ORIGINAL */ /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */ -/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW}, +/* init */ {sCL, sCL, sCW, sCE, sES, sCL, sCL, sSA, sCW}, /* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL}, /* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, /* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL}, diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index c6de10f458fa..b280b151a9e9 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -7088,6 +7088,7 @@ static int nft_set_catchall_flush(const struct nft_ctx *ctx, ret = __nft_set_catchall_flush(ctx, set, &elem); if (ret < 0) break; + nft_set_elem_change_active(ctx->net, set, ext); }
return ret; @@ -9494,9 +9495,14 @@ struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set, if (!trans) return NULL;
+ trans->net = maybe_get_net(net); + if (!trans->net) { + kfree(trans); + return NULL; + } + refcount_inc(&set->refs); trans->set = set; - trans->net = get_net(net); trans->seq = gc_seq;
return trans; @@ -9752,6 +9758,22 @@ static void nft_set_commit_update(struct list_head *set_update_list) } }
+static unsigned int nft_gc_seq_begin(struct nftables_pernet *nft_net) +{ + unsigned int gc_seq; + + /* Bump gc counter, it becomes odd, this is the busy mark. */ + gc_seq = READ_ONCE(nft_net->gc_seq); + WRITE_ONCE(nft_net->gc_seq, ++gc_seq); + + return gc_seq; +} + +static void nft_gc_seq_end(struct nftables_pernet *nft_net, unsigned int gc_seq) +{ + WRITE_ONCE(nft_net->gc_seq, ++gc_seq); +} + static int nf_tables_commit(struct net *net, struct sk_buff *skb) { struct nftables_pernet *nft_net = nft_pernet(net); @@ -9837,9 +9859,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
WRITE_ONCE(nft_net->base_seq, base_seq);
- /* Bump gc counter, it becomes odd, this is the busy mark. */ - gc_seq = READ_ONCE(nft_net->gc_seq); - WRITE_ONCE(nft_net->gc_seq, ++gc_seq); + gc_seq = nft_gc_seq_begin(nft_net);
/* step 3. Start new generation, rules_gen_X now in use. */ net->nft.gencursor = nft_gencursor_next(net); @@ -10049,7 +10069,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); nf_tables_commit_audit_log(&adl, nft_net->base_seq);
- WRITE_ONCE(nft_net->gc_seq, ++gc_seq); + nft_gc_seq_end(nft_net, gc_seq); nf_tables_commit_release(net);
return 0; @@ -11050,6 +11070,7 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event, struct net *net = n->net; unsigned int deleted; bool restart = false; + unsigned int gc_seq;
if (event != NETLINK_URELEASE || n->protocol != NETLINK_NETFILTER) return NOTIFY_DONE; @@ -11057,6 +11078,9 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event, nft_net = nft_pernet(net); deleted = 0; mutex_lock(&nft_net->commit_mutex); + + gc_seq = nft_gc_seq_begin(nft_net); + if (!list_empty(&nf_tables_destroy_list)) rcu_barrier(); again: @@ -11079,6 +11103,8 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event, if (restart) goto again; } + nft_gc_seq_end(nft_net, gc_seq); + mutex_unlock(&nft_net->commit_mutex);
return NOTIFY_DONE; @@ -11116,12 +11142,20 @@ static void __net_exit nf_tables_pre_exit_net(struct net *net) static void __net_exit nf_tables_exit_net(struct net *net) { struct nftables_pernet *nft_net = nft_pernet(net); + unsigned int gc_seq;
mutex_lock(&nft_net->commit_mutex); + + gc_seq = nft_gc_seq_begin(nft_net); + if (!list_empty(&nft_net->commit_list) || !list_empty(&nft_net->module_list)) __nf_tables_abort(net, NFNL_ABORT_NONE); + __nft_release_tables(net); + + nft_gc_seq_end(nft_net, gc_seq); + mutex_unlock(&nft_net->commit_mutex); WARN_ON_ONCE(!list_empty(&nft_net->tables)); WARN_ON_ONCE(!list_empty(&nft_net->module_list)); diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index bd19c7aec92e..c98a273c3006 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -191,6 +191,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx, if (IS_ERR(set)) return PTR_ERR(set);
+ if (set->flags & NFT_SET_OBJECT) + return -EOPNOTSUPP; + if (set->ops->update == NULL) return -EOPNOTSUPP;
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index 92b108e3000e..352180b123fc 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -566,6 +566,8 @@ static struct nft_pipapo_elem *pipapo_get(const struct net *net, goto out;
if (last) { + if (nft_set_elem_expired(&f->mt[b].e->ext)) + goto next_match; if ((genmask && !nft_set_elem_active(&f->mt[b].e->ext, genmask))) goto next_match; @@ -600,17 +602,8 @@ static struct nft_pipapo_elem *pipapo_get(const struct net *net, static void *nft_pipapo_get(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem, unsigned int flags) { - struct nft_pipapo_elem *ret; - - ret = pipapo_get(net, set, (const u8 *)elem->key.val.data, + return pipapo_get(net, set, (const u8 *)elem->key.val.data, nft_genmask_cur(net)); - if (IS_ERR(ret)) - return ret; - - if (nft_set_elem_expired(&ret->ext)) - return ERR_PTR(-ENOENT); - - return ret; }
/** @@ -1698,6 +1691,17 @@ static void nft_pipapo_commit(const struct nft_set *set) priv->clone = new_clone; }
+static bool nft_pipapo_transaction_mutex_held(const struct nft_set *set) +{ +#ifdef CONFIG_PROVE_LOCKING + const struct net *net = read_pnet(&set->net); + + return lockdep_is_held(&nft_pernet(net)->commit_mutex); +#else + return true; +#endif +} + static void nft_pipapo_abort(const struct nft_set *set) { struct nft_pipapo *priv = nft_set_priv(set); @@ -1706,7 +1710,7 @@ static void nft_pipapo_abort(const struct nft_set *set) if (!priv->dirty) return;
- m = rcu_dereference(priv->match); + m = rcu_dereference_protected(priv->match, nft_pipapo_transaction_mutex_held(set));
new_clone = pipapo_clone(m); if (IS_ERR(new_clone)) @@ -1733,11 +1737,7 @@ static void nft_pipapo_activate(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem) { - struct nft_pipapo_elem *e; - - e = pipapo_get(net, set, (const u8 *)elem->key.val.data, 0); - if (IS_ERR(e)) - return; + struct nft_pipapo_elem *e = elem->priv;
nft_set_elem_change_active(net, set, &e->ext); } @@ -1951,10 +1951,6 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
data = (const u8 *)nft_set_ext_key(&e->ext);
- e = pipapo_get(net, set, data, 0); - if (IS_ERR(e)) - return; - while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) { union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS]; const u8 *match_start, *match_end; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index a6d2a0b1aa21..3d7a91e64c88 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -1829,7 +1829,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) parms.port_no = OVSP_LOCAL; parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID]; parms.desired_ifindex = a[OVS_DP_ATTR_IFINDEX] - ? nla_get_u32(a[OVS_DP_ATTR_IFINDEX]) : 0; + ? nla_get_s32(a[OVS_DP_ATTR_IFINDEX]) : 0;
/* So far only local changes have been made, now need the lock. */ ovs_lock(); @@ -2049,7 +2049,7 @@ static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 }, [OVS_DP_ATTR_MASKS_CACHE_SIZE] = NLA_POLICY_RANGE(NLA_U32, 0, PCPU_MIN_UNIT_SIZE / sizeof(struct mask_cache_entry)), - [OVS_DP_ATTR_IFINDEX] = {.type = NLA_U32 }, + [OVS_DP_ATTR_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 0), };
static const struct genl_small_ops dp_datapath_genl_ops[] = { @@ -2302,7 +2302,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) parms.port_no = port_no; parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID]; parms.desired_ifindex = a[OVS_VPORT_ATTR_IFINDEX] - ? nla_get_u32(a[OVS_VPORT_ATTR_IFINDEX]) : 0; + ? nla_get_s32(a[OVS_VPORT_ATTR_IFINDEX]) : 0;
vport = new_vport(&parms); err = PTR_ERR(vport); @@ -2539,7 +2539,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = { [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 }, [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC }, [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED }, - [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 }, + [OVS_VPORT_ATTR_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 0), [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 }, [OVS_VPORT_ATTR_UPCALL_STATS] = { .type = NLA_NESTED }, }; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 10615878e396..714bd87f12d9 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2291,6 +2291,7 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
if (false) { alloc_skb: + spin_unlock(&other->sk_receive_queue.lock); unix_state_unlock(other); mutex_unlock(&unix_sk(other)->iolock); newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT, @@ -2330,6 +2331,7 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, init_scm = false; }
+ spin_lock(&other->sk_receive_queue.lock); skb = skb_peek_tail(&other->sk_receive_queue); if (tail && tail == skb) { skb = newskb; @@ -2360,14 +2362,11 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, refcount_add(size, &sk->sk_wmem_alloc);
if (newskb) { - err = unix_scm_to_skb(&scm, skb, false); - if (err) - goto err_state_unlock; - spin_lock(&other->sk_receive_queue.lock); + unix_scm_to_skb(&scm, skb, false); __skb_queue_tail(&other->sk_receive_queue, newskb); - spin_unlock(&other->sk_receive_queue.lock); }
+ spin_unlock(&other->sk_receive_queue.lock); unix_state_unlock(other); mutex_unlock(&unix_sk(other)->iolock);
diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c index 8cbf45a8bcdc..655fe4ff8621 100644 --- a/net/xfrm/xfrm_compat.c +++ b/net/xfrm/xfrm_compat.c @@ -108,7 +108,7 @@ static const struct nla_policy compat_policy[XFRMA_MAX+1] = { [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, - [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) }, + [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) }, [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 815b38080401..d5ee96789d4b 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -180,6 +180,8 @@ static int xfrm4_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb) int optlen = 0; int err = -EINVAL;
+ skb->protocol = htons(ETH_P_IP); + if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) { struct ip_beet_phdr *ph; int phlen; @@ -232,6 +234,8 @@ static int xfrm4_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb) { int err = -EINVAL;
+ skb->protocol = htons(ETH_P_IP); + if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto out;
@@ -267,6 +271,8 @@ static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb) { int err = -EINVAL;
+ skb->protocol = htons(ETH_P_IPV6); + if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto out;
@@ -296,6 +302,8 @@ static int xfrm6_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb) int size = sizeof(struct ipv6hdr); int err;
+ skb->protocol = htons(ETH_P_IPV6); + err = skb_cow_head(skb, size + skb->mac_len); if (err) goto out; @@ -346,6 +354,7 @@ xfrm_inner_mode_encap_remove(struct xfrm_state *x, return xfrm6_remove_tunnel_encap(x, skb); break; } + return -EINVAL; }
WARN_ON_ONCE(1); @@ -366,19 +375,6 @@ static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb) return -EAFNOSUPPORT; }
- switch (XFRM_MODE_SKB_CB(skb)->protocol) { - case IPPROTO_IPIP: - case IPPROTO_BEETPH: - skb->protocol = htons(ETH_P_IP); - break; - case IPPROTO_IPV6: - skb->protocol = htons(ETH_P_IPV6); - break; - default: - WARN_ON_ONCE(1); - break; - } - return xfrm_inner_mode_encap_remove(x, skb); }
diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c index a3319965470a..b86474084690 100644 --- a/net/xfrm/xfrm_interface_core.c +++ b/net/xfrm/xfrm_interface_core.c @@ -537,8 +537,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
switch (skb->protocol) { case htons(ETH_P_IPV6): - xfrm_decode_session(skb, &fl, AF_INET6); memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + xfrm_decode_session(skb, &fl, AF_INET6); if (!dst) { fl.u.ip6.flowi6_oif = dev->ifindex; fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC; @@ -552,8 +552,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev) } break; case htons(ETH_P_IP): - xfrm_decode_session(skb, &fl, AF_INET); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + xfrm_decode_session(skb, &fl, AF_INET); if (!dst) { struct rtable *rt;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 49e63eea841d..bda5327bf34d 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1324,12 +1324,8 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, struct xfrm_dev_offload *xso = &x->xso;
if (xso->type == XFRM_DEV_OFFLOAD_PACKET) { - xso->dev->xfrmdev_ops->xdo_dev_state_delete(x); - xso->dir = 0; - netdev_put(xso->dev, &xso->dev_tracker); - xso->dev = NULL; - xso->real_dev = NULL; - xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; + xfrm_dev_state_delete(x); + xfrm_dev_state_free(x); } #endif x->km.state = XFRM_STATE_DEAD; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index c34a2a06ca94..ad01997c3aa9 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -628,7 +628,7 @@ static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs, struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH];
- if (re) { + if (re && x->replay_esn && x->preplay_esn) { struct xfrm_replay_state_esn *replay_esn; replay_esn = nla_data(re); memcpy(x->replay_esn, replay_esn, @@ -1267,6 +1267,15 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) sizeof(*filter), GFP_KERNEL); if (filter == NULL) return -ENOMEM; + + /* see addr_match(), (prefix length >> 5) << 2 + * will be used to compare xfrm_address_t + */ + if (filter->splen > (sizeof(xfrm_address_t) << 3) || + filter->dplen > (sizeof(xfrm_address_t) << 3)) { + kfree(filter); + return -EINVAL; + } }
if (attrs[XFRMA_PROTO]) @@ -2336,6 +2345,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, NETLINK_CB(skb).portid); } } else { + xfrm_dev_policy_delete(xp); xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
if (err != 0) @@ -3015,7 +3025,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, - [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) }, + [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) }, [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, @@ -3035,6 +3045,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { [XFRMA_SET_MARK] = { .type = NLA_U32 }, [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 }, [XFRMA_IF_ID] = { .type = NLA_U32 }, + [XFRMA_MTIMER_THRESH] = { .type = NLA_U32 }, }; EXPORT_SYMBOL_GPL(xfrma_policy);
diff --git a/rust/macros/vtable.rs b/rust/macros/vtable.rs index 34d5e7fb5768..ee06044fcd4f 100644 --- a/rust/macros/vtable.rs +++ b/rust/macros/vtable.rs @@ -74,6 +74,7 @@ pub(crate) fn vtable(_attr: TokenStream, ts: TokenStream) -> TokenStream { const {gen_const_name}: bool = false;", ) .unwrap(); + consts.insert(gen_const_name); } } else { const_items = "const USE_VTABLE_ATTR: () = ();".to_owned(); diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c index fe3587547cfe..39610a15bcc9 100644 --- a/sound/hda/hdac_regmap.c +++ b/sound/hda/hdac_regmap.c @@ -597,10 +597,9 @@ EXPORT_SYMBOL_GPL(snd_hdac_regmap_update_raw_once); */ void snd_hdac_regmap_sync(struct hdac_device *codec) { - if (codec->regmap) { - mutex_lock(&codec->regmap_lock); + mutex_lock(&codec->regmap_lock); + if (codec->regmap) regcache_sync(codec->regmap); - mutex_unlock(&codec->regmap_lock); - } + mutex_unlock(&codec->regmap_lock); } EXPORT_SYMBOL_GPL(snd_hdac_regmap_sync); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index bcd548e247fc..074aa06aa585 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -7081,6 +7081,9 @@ enum { ALC285_FIXUP_SPEAKER2_TO_DAC1, ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1, ALC285_FIXUP_ASUS_HEADSET_MIC, + ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS, + ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1, + ALC285_FIXUP_ASUS_I2C_HEADSET_MIC, ALC280_FIXUP_HP_HEADSET_MIC, ALC221_FIXUP_HP_FRONT_MIC, ALC292_FIXUP_TPT460, @@ -8073,6 +8076,31 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1 }, + [ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x14, 0x90170120 }, + { } + }, + .chained = true, + .chain_id = ALC285_FIXUP_ASUS_HEADSET_MIC + }, + [ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_speaker2_to_dac1, + .chained = true, + .chain_id = ALC287_FIXUP_CS35L41_I2C_2 + }, + [ALC285_FIXUP_ASUS_I2C_HEADSET_MIC] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x03a11050 }, + { 0x1b, 0x03a11c30 }, + { } + }, + .chained = true, + .chain_id = ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1 + }, [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -9578,7 +9606,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8b96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED), - SND_PCI_QUIRK(0x103c, 0x8c26, "HP HP EliteBook 800G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8c46, "HP EliteBook 830 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8c47, "HP EliteBook 840 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8c48, "HP EliteBook 860 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8c49, "HP Elite x360 830 2-in-1 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), @@ -9598,10 +9632,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), + SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650P", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x1463, "Asus GA402X", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604V", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603V", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1493, "ASUS GV601V", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), + SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301V", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK), SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), @@ -9627,7 +9664,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS), - SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JI", ALC285_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1d1f, "ASUS ROG Strix G17 2023 (G713PV)", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401), @@ -10595,6 +10633,7 @@ static int patch_alc269(struct hda_codec *codec) spec = codec->spec; spec->gen.shared_mic_vref_pin = 0x18; codec->power_save_node = 0; + spec->en_3kpull_low = true;
#ifdef CONFIG_PM codec->patch_ops.suspend = alc269_suspend; @@ -10677,14 +10716,16 @@ static int patch_alc269(struct hda_codec *codec) spec->shutup = alc256_shutup; spec->init_hook = alc256_init; spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */ - if (codec->bus->pci->vendor == PCI_VENDOR_ID_AMD) - spec->en_3kpull_low = true; + if (codec->core.vendor_id == 0x10ec0236 && + codec->bus->pci->vendor != PCI_VENDOR_ID_AMD) + spec->en_3kpull_low = false; break; case 0x10ec0257: spec->codec_variant = ALC269_TYPE_ALC257; spec->shutup = alc256_shutup; spec->init_hook = alc256_init; spec->gen.mixer_nid = 0; + spec->en_3kpull_low = false; break; case 0x10ec0215: case 0x10ec0245: @@ -11316,6 +11357,7 @@ enum { ALC897_FIXUP_HP_HSMIC_VERB, ALC897_FIXUP_LENOVO_HEADSET_MODE, ALC897_FIXUP_HEADSET_MIC_PIN2, + ALC897_FIXUP_UNIS_H3C_X500S, };
static const struct hda_fixup alc662_fixups[] = { @@ -11755,6 +11797,13 @@ static const struct hda_fixup alc662_fixups[] = { .chained = true, .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MODE }, + [ALC897_FIXUP_UNIS_H3C_X500S] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + { 0x14, AC_VERB_SET_EAPD_BTLENABLE, 0 }, + {} + }, + }, };
static const struct snd_pci_quirk alc662_fixup_tbl[] = { @@ -11916,6 +11965,7 @@ static const struct hda_model_fixup alc662_fixup_models[] = { {.id = ALC662_FIXUP_USI_HEADSET_MODE, .name = "usi-headset"}, {.id = ALC662_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"}, {.id = ALC669_FIXUP_ACER_ASPIRE_ETHOS, .name = "aspire-ethos"}, + {.id = ALC897_FIXUP_UNIS_H3C_X500S, .name = "unis-h3c-x500s"}, {} };
diff --git a/sound/soc/amd/Kconfig b/sound/soc/amd/Kconfig index 08e42082f5e9..e724cb3c70b7 100644 --- a/sound/soc/amd/Kconfig +++ b/sound/soc/amd/Kconfig @@ -81,6 +81,7 @@ config SND_SOC_AMD_VANGOGH_MACH tristate "AMD Vangogh support for NAU8821 CS35L41" select SND_SOC_NAU8821 select SND_SOC_CS35L41_SPI + select SND_AMD_ACP_CONFIG depends on SND_SOC_AMD_ACP5x && I2C && SPI_MASTER help This option enables machine driver for Vangogh platform diff --git a/sound/soc/amd/vangogh/acp5x.h b/sound/soc/amd/vangogh/acp5x.h index bd9f1c5684d1..ac1936a8c43f 100644 --- a/sound/soc/amd/vangogh/acp5x.h +++ b/sound/soc/amd/vangogh/acp5x.h @@ -147,6 +147,8 @@ static inline void acp_writel(u32 val, void __iomem *base_addr) writel(val, base_addr - ACP5x_PHY_BASE_ADDRESS); }
+int snd_amd_acp_find_config(struct pci_dev *pci); + static inline u64 acp_get_byte_count(struct i2s_stream_instance *rtd, int direction) { diff --git a/sound/soc/amd/vangogh/pci-acp5x.c b/sound/soc/amd/vangogh/pci-acp5x.c index e0df17c88e8e..c4634a8a17cd 100644 --- a/sound/soc/amd/vangogh/pci-acp5x.c +++ b/sound/soc/amd/vangogh/pci-acp5x.c @@ -125,10 +125,15 @@ static int snd_acp5x_probe(struct pci_dev *pci, { struct acp5x_dev_data *adata; struct platform_device_info pdevinfo[ACP5x_DEVS]; - unsigned int irqflags; + unsigned int irqflags, flag; int ret, i; u32 addr, val;
+ /* Return if acp config flag is defined */ + flag = snd_amd_acp_find_config(pci); + if (flag) + return -ENODEV; + irqflags = IRQF_SHARED; if (pci->revision != 0x50) return -ENODEV; diff --git a/sound/soc/codecs/cs35l56.c b/sound/soc/codecs/cs35l56.c index e0d2b9bb2326..f3fee448d759 100644 --- a/sound/soc/codecs/cs35l56.c +++ b/sound/soc/codecs/cs35l56.c @@ -834,12 +834,6 @@ static void cs35l56_dsp_work(struct work_struct *work) if (!cs35l56->init_done) return;
- cs35l56->dsp.part = devm_kasprintf(cs35l56->dev, GFP_KERNEL, "cs35l56%s-%02x", - cs35l56->secured ? "s" : "", cs35l56->rev); - - if (!cs35l56->dsp.part) - return; - pm_runtime_get_sync(cs35l56->dev);
/* @@ -1505,6 +1499,12 @@ int cs35l56_init(struct cs35l56_private *cs35l56) dev_info(cs35l56->dev, "Cirrus Logic CS35L56%s Rev %02X OTP%d\n", cs35l56->secured ? "s" : "", cs35l56->rev, otpid);
+ /* Populate the DSP information with the revision and security state */ + cs35l56->dsp.part = devm_kasprintf(cs35l56->dev, GFP_KERNEL, "cs35l56%s-%02x", + cs35l56->secured ? "s" : "", cs35l56->rev); + if (!cs35l56->dsp.part) + return -ENOMEM; + /* Wake source and *_BLOCKED interrupts default to unmasked, so mask them */ regmap_write(cs35l56->regmap, CS35L56_IRQ1_MASK_20, 0xffffffff); regmap_update_bits(cs35l56->regmap, CS35L56_IRQ1_MASK_1, diff --git a/sound/soc/codecs/max98363.c b/sound/soc/codecs/max98363.c index e6b84e222b50..169913ba76dd 100644 --- a/sound/soc/codecs/max98363.c +++ b/sound/soc/codecs/max98363.c @@ -191,10 +191,10 @@ static int max98363_io_init(struct sdw_slave *slave) pm_runtime_get_noresume(dev);
ret = regmap_read(max98363->regmap, MAX98363_R21FF_REV_ID, ®); - if (!ret) { + if (!ret) dev_info(dev, "Revision ID: %X\n", reg); - return ret; - } + else + goto out;
if (max98363->first_hw_init) { regcache_cache_bypass(max98363->regmap, false); @@ -204,10 +204,11 @@ static int max98363_io_init(struct sdw_slave *slave) max98363->first_hw_init = true; max98363->hw_init = true;
+out: pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev);
- return 0; + return ret; }
#define MAX98363_RATES SNDRV_PCM_RATE_8000_192000 diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c index 17afaef85c77..382bdbcf7b59 100644 --- a/sound/soc/codecs/rt5665.c +++ b/sound/soc/codecs/rt5665.c @@ -4472,6 +4472,8 @@ static void rt5665_remove(struct snd_soc_component *component) struct rt5665_priv *rt5665 = snd_soc_component_get_drvdata(component);
regmap_write(rt5665->regmap, RT5665_RESET, 0); + + regulator_bulk_disable(ARRAY_SIZE(rt5665->supplies), rt5665->supplies); }
#ifdef CONFIG_PM diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c index 5fa204897a52..a6d13aae8f72 100644 --- a/sound/soc/intel/boards/sof_sdw.c +++ b/sound/soc/intel/boards/sof_sdw.c @@ -367,6 +367,16 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = { RT711_JD2), }, /* RaptorLake devices */ + { + .callback = sof_sdw_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0BDA") + }, + .driver_data = (void *)(SOF_SDW_TGL_HDMI | + RT711_JD2 | + SOF_SDW_FOUR_SPK), + }, { .callback = sof_sdw_quirk_cb, .matches = { @@ -415,6 +425,31 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = { }, .driver_data = (void *)(RT711_JD1), }, + { + .callback = sof_sdw_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Meteor Lake Client Platform"), + }, + .driver_data = (void *)(RT711_JD2_100K), + }, + { + .callback = sof_sdw_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Google"), + DMI_MATCH(DMI_PRODUCT_NAME, "Rex"), + }, + .driver_data = (void *)(SOF_SDW_PCH_DMIC), + }, + /* LunarLake devices */ + { + .callback = sof_sdw_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Lunar Lake Client Platform"), + }, + .driver_data = (void *)(RT711_JD2_100K), + }, {} };
diff --git a/sound/soc/intel/boards/sof_sdw_rt711_sdca.c b/sound/soc/intel/boards/sof_sdw_rt711_sdca.c index 7f16304d025b..cf8b9793fe0e 100644 --- a/sound/soc/intel/boards/sof_sdw_rt711_sdca.c +++ b/sound/soc/intel/boards/sof_sdw_rt711_sdca.c @@ -143,6 +143,9 @@ int sof_sdw_rt711_sdca_exit(struct snd_soc_card *card, struct snd_soc_dai_link * if (!ctx->headset_codec_dev) return 0;
+ if (!SOF_RT711_JDSRC(sof_sdw_quirk)) + return 0; + device_remove_software_node(ctx->headset_codec_dev); put_device(ctx->headset_codec_dev);
diff --git a/sound/soc/meson/axg-tdm-formatter.c b/sound/soc/meson/axg-tdm-formatter.c index 9883dc777f63..63333a2b0a9c 100644 --- a/sound/soc/meson/axg-tdm-formatter.c +++ b/sound/soc/meson/axg-tdm-formatter.c @@ -30,27 +30,32 @@ int axg_tdm_formatter_set_channel_masks(struct regmap *map, struct axg_tdm_stream *ts, unsigned int offset) { - unsigned int val, ch = ts->channels; - unsigned long mask; - int i, j; + unsigned int ch = ts->channels; + u32 val[AXG_TDM_NUM_LANES]; + int i, j, k; + + /* + * We need to mimick the slot distribution used by the HW to keep the + * channel placement consistent regardless of the number of channel + * in the stream. This is why the odd algorithm below is used. + */ + memset(val, 0, sizeof(*val) * AXG_TDM_NUM_LANES);
/* * Distribute the channels of the stream over the available slots - * of each TDM lane + * of each TDM lane. We need to go over the 32 slots ... */ - for (i = 0; i < AXG_TDM_NUM_LANES; i++) { - val = 0; - mask = ts->mask[i]; - - for (j = find_first_bit(&mask, 32); - (j < 32) && ch; - j = find_next_bit(&mask, 32, j + 1)) { - val |= 1 << j; - ch -= 1; + for (i = 0; (i < 32) && ch; i += 2) { + /* ... of all the lanes ... */ + for (j = 0; j < AXG_TDM_NUM_LANES; j++) { + /* ... then distribute the channels in pairs */ + for (k = 0; k < 2; k++) { + if ((BIT(i + k) & ts->mask[j]) && ch) { + val[j] |= BIT(i + k); + ch -= 1; + } + } } - - regmap_write(map, offset, val); - offset += regmap_get_reg_stride(map); }
/* @@ -63,6 +68,11 @@ int axg_tdm_formatter_set_channel_masks(struct regmap *map, return -EINVAL; }
+ for (i = 0; i < AXG_TDM_NUM_LANES; i++) { + regmap_write(map, offset, val[i]); + offset += regmap_get_reg_stride(map); + } + return 0; } EXPORT_SYMBOL_GPL(axg_tdm_formatter_set_channel_masks); diff --git a/sound/soc/sof/amd/acp.h b/sound/soc/sof/amd/acp.h index 1c535cc6c3a9..dc624f727aa3 100644 --- a/sound/soc/sof/amd/acp.h +++ b/sound/soc/sof/amd/acp.h @@ -55,6 +55,9 @@
#define ACP_DSP_TO_HOST_IRQ 0x04
+#define ACP_RN_PCI_ID 0x01 +#define ACP_RMB_PCI_ID 0x6F + #define HOST_BRIDGE_CZN 0x1630 #define HOST_BRIDGE_RMB 0x14B5 #define ACP_SHA_STAT 0x8000 diff --git a/sound/soc/sof/amd/pci-rmb.c b/sound/soc/sof/amd/pci-rmb.c index eaf70ea6e556..58b3092425f1 100644 --- a/sound/soc/sof/amd/pci-rmb.c +++ b/sound/soc/sof/amd/pci-rmb.c @@ -65,6 +65,9 @@ static int acp_pci_rmb_probe(struct pci_dev *pci, const struct pci_device_id *pc { unsigned int flag;
+ if (pci->revision != ACP_RMB_PCI_ID) + return -ENODEV; + flag = snd_amd_acp_find_config(pci); if (flag != FLAG_AMD_SOF && flag != FLAG_AMD_SOF_ONLY_DMIC) return -ENODEV; diff --git a/sound/soc/sof/amd/pci-rn.c b/sound/soc/sof/amd/pci-rn.c index 4809cb644152..7409e21ce5aa 100644 --- a/sound/soc/sof/amd/pci-rn.c +++ b/sound/soc/sof/amd/pci-rn.c @@ -65,6 +65,9 @@ static int acp_pci_rn_probe(struct pci_dev *pci, const struct pci_device_id *pci { unsigned int flag;
+ if (pci->revision != ACP_RN_PCI_ID) + return -ENODEV; + flag = snd_amd_acp_find_config(pci); if (flag != FLAG_AMD_SOF && flag != FLAG_AMD_SOF_ONLY_DMIC) return -ENODEV; diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c index 9a9d82220fd0..30db685cc5f4 100644 --- a/sound/soc/sof/core.c +++ b/sound/soc/sof/core.c @@ -504,8 +504,10 @@ int snd_sof_device_shutdown(struct device *dev) if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)) cancel_work_sync(&sdev->probe_work);
- if (sdev->fw_state == SOF_FW_BOOT_COMPLETE) + if (sdev->fw_state == SOF_FW_BOOT_COMPLETE) { + sof_fw_trace_free(sdev); return snd_sof_shutdown(sdev); + }
return 0; } diff --git a/sound/soc/sof/intel/hda-dai-ops.c b/sound/soc/sof/intel/hda-dai-ops.c index 4b39cecacd68..5938046f46b2 100644 --- a/sound/soc/sof/intel/hda-dai-ops.c +++ b/sound/soc/sof/intel/hda-dai-ops.c @@ -289,16 +289,27 @@ static const struct hda_dai_widget_dma_ops hda_ipc4_chain_dma_ops = { static int hda_ipc3_post_trigger(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai, struct snd_pcm_substream *substream, int cmd) { + struct hdac_ext_stream *hext_stream = hda_get_hext_stream(sdev, cpu_dai, substream); struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream); + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); + struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
switch (cmd) { case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_STOP: { struct snd_sof_dai_config_data data = { 0 }; + int ret;
data.dai_data = DMA_CHAN_INVALID; - return hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_HW_FREE, &data); + ret = hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_HW_FREE, &data); + if (ret < 0) + return ret; + + if (cmd == SNDRV_PCM_TRIGGER_STOP) + return hda_link_dma_cleanup(substream, hext_stream, cpu_dai, codec_dai); + + break; } case SNDRV_PCM_TRIGGER_PAUSE_PUSH: return hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_PAUSE, NULL); diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c index 44a5d94c5050..8a76320c3b99 100644 --- a/sound/soc/sof/intel/hda-dai.c +++ b/sound/soc/sof/intel/hda-dai.c @@ -91,10 +91,10 @@ hda_dai_get_ops(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai return sdai->platform_private; }
-static int hda_link_dma_cleanup(struct snd_pcm_substream *substream, - struct hdac_ext_stream *hext_stream, - struct snd_soc_dai *cpu_dai, - struct snd_soc_dai *codec_dai) +int hda_link_dma_cleanup(struct snd_pcm_substream *substream, + struct hdac_ext_stream *hext_stream, + struct snd_soc_dai *cpu_dai, + struct snd_soc_dai *codec_dai) { struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(cpu_dai->component); const struct hda_dai_widget_dma_ops *ops = hda_dai_get_ops(substream, cpu_dai); diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c index 3153e21f100a..3853582e32e1 100644 --- a/sound/soc/sof/intel/hda.c +++ b/sound/soc/sof/intel/hda.c @@ -1343,12 +1343,22 @@ static void hda_generic_machine_select(struct snd_sof_dev *sdev, hda_mach->mach_params.dmic_num = dmic_num; pdata->tplg_filename = tplg_filename;
- if (codec_num == 2) { + if (codec_num == 2 || + (codec_num == 1 && !HDA_IDISP_CODEC(bus->codec_mask))) { /* * Prevent SoundWire links from starting when an external * HDaudio codec is used */ hda_mach->mach_params.link_mask = 0; + } else { + /* + * Allow SoundWire links to start when no external HDaudio codec + * was detected. This will not create a SoundWire card but + * will help detect if any SoundWire codec reports as ATTACHED. + */ + struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata; + + hda_mach->mach_params.link_mask = hdev->info.link_mask; }
*mach = hda_mach; diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h index c4befacde23e..94c738eae751 100644 --- a/sound/soc/sof/intel/hda.h +++ b/sound/soc/sof/intel/hda.h @@ -942,5 +942,7 @@ const struct hda_dai_widget_dma_ops * hda_select_dai_widget_ops(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget); int hda_dai_config(struct snd_soc_dapm_widget *w, unsigned int flags, struct snd_sof_dai_config_data *data); +int hda_link_dma_cleanup(struct snd_pcm_substream *substream, struct hdac_ext_stream *hext_stream, + struct snd_soc_dai *cpu_dai, struct snd_soc_dai *codec_dai);
#endif diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index efb4a3311cc5..5d72dc8441cb 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -4507,6 +4507,35 @@ YAMAHA_DEVICE(0x7010, "UB99"), } } }, +{ + /* Advanced modes of the Mythware XA001AU. + * For the standard mode, Mythware XA001AU has ID ffad:a001 + */ + USB_DEVICE_VENDOR_SPEC(0xffad, 0xa001), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .vendor_name = "Mythware", + .product_name = "XA001AU", + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_IGNORE_INTERFACE, + }, + { + .ifnum = 1, + .type = QUIRK_AUDIO_STANDARD_INTERFACE, + }, + { + .ifnum = 2, + .type = QUIRK_AUDIO_STANDARD_INTERFACE, + }, + { + .ifnum = -1 + } + } + } +},
#undef USB_DEVICE_VENDOR_SPEC #undef USB_AUDIO_DEVICE diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index a25fdc08c39e..228b2bb086ac 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -824,8 +824,11 @@ bool arch_is_retpoline(struct symbol *sym)
bool arch_is_rethunk(struct symbol *sym) { - return !strcmp(sym->name, "__x86_return_thunk") || - !strcmp(sym->name, "srso_untrain_ret") || - !strcmp(sym->name, "srso_safe_ret") || - !strcmp(sym->name, "__ret"); + return !strcmp(sym->name, "__x86_return_thunk"); +} + +bool arch_is_embedded_insn(struct symbol *sym) +{ + return !strcmp(sym->name, "retbleed_return_thunk") || + !strcmp(sym->name, "srso_safe_ret"); } diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 0fcf99c91400..f7f34a0b101e 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -429,7 +429,7 @@ static int decode_instructions(struct objtool_file *file) if (!strcmp(sec->name, ".noinstr.text") || !strcmp(sec->name, ".entry.text") || !strcmp(sec->name, ".cpuidle.text") || - !strncmp(sec->name, ".text.__x86.", 12)) + !strncmp(sec->name, ".text..__x86.", 13)) sec->noinstr = true;
/* @@ -495,7 +495,7 @@ static int decode_instructions(struct objtool_file *file) return -1; }
- if (func->return_thunk || func->alias != func) + if (func->embedded_insn || func->alias != func) continue;
if (!find_insn(file, sec, func->offset)) { @@ -1346,16 +1346,33 @@ static int add_ignore_alternatives(struct objtool_file *file) return 0; }
+/* + * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol + * will be added to the .retpoline_sites section. + */ __weak bool arch_is_retpoline(struct symbol *sym) { return false; }
+/* + * Symbols that replace INSN_RETURN, every (tail) call to such a symbol + * will be added to the .return_sites section. + */ __weak bool arch_is_rethunk(struct symbol *sym) { return false; }
+/* + * Symbols that are embedded inside other instructions, because sometimes crazy + * code exists. These are mostly ignored for validation purposes. + */ +__weak bool arch_is_embedded_insn(struct symbol *sym) +{ + return false; +} + static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) { struct reloc *reloc; @@ -1638,14 +1655,14 @@ static int add_jump_destinations(struct objtool_file *file) struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
/* - * This is a special case for zen_untrain_ret(). + * This is a special case for retbleed_untrain_ret(). * It jumps to __x86_return_thunk(), but objtool * can't find the thunk's starting RET * instruction, because the RET is also in the * middle of another instruction. Objtool only * knows about the outer instruction. */ - if (sym && sym->return_thunk) { + if (sym && sym->embedded_insn) { add_return_call(file, insn, false); continue; } @@ -2550,6 +2567,9 @@ static int classify_symbols(struct objtool_file *file) if (arch_is_rethunk(func)) func->return_thunk = true;
+ if (arch_is_embedded_insn(func)) + func->embedded_insn = true; + if (arch_ftrace_match(func->name)) func->fentry = true;
@@ -2678,12 +2698,17 @@ static int decode_sections(struct objtool_file *file) return 0; }
-static bool is_fentry_call(struct instruction *insn) +static bool is_special_call(struct instruction *insn) { - if (insn->type == INSN_CALL && - insn_call_dest(insn) && - insn_call_dest(insn)->fentry) - return true; + if (insn->type == INSN_CALL) { + struct symbol *dest = insn_call_dest(insn); + + if (!dest) + return false; + + if (dest->fentry || dest->embedded_insn) + return true; + }
return false; } @@ -3681,7 +3706,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, if (ret) return ret;
- if (opts.stackval && func && !is_fentry_call(insn) && + if (opts.stackval && func && !is_special_call(insn) && !has_valid_stack_frame(&state)) { WARN_INSN(insn, "call without frame pointer save/setup"); return 1; diff --git a/tools/objtool/include/objtool/arch.h b/tools/objtool/include/objtool/arch.h index 2b6d2ce4f9a5..0b303eba660e 100644 --- a/tools/objtool/include/objtool/arch.h +++ b/tools/objtool/include/objtool/arch.h @@ -90,6 +90,7 @@ int arch_decode_hint_reg(u8 sp_reg, int *base);
bool arch_is_retpoline(struct symbol *sym); bool arch_is_rethunk(struct symbol *sym); +bool arch_is_embedded_insn(struct symbol *sym);
int arch_rewrite_retpolines(struct objtool_file *file);
diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h index e1ca588eb69d..bfb4a69d0e91 100644 --- a/tools/objtool/include/objtool/elf.h +++ b/tools/objtool/include/objtool/elf.h @@ -61,6 +61,7 @@ struct symbol { u8 return_thunk : 1; u8 fentry : 1; u8 profiling_func : 1; + u8 embedded_insn : 1; struct list_head pv_target; struct list_head reloc_list; }; diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 9e02e19c1b7a..4d564e0698df 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -44,7 +44,6 @@ #include <linux/zalloc.h>
static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock); -static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip);
static struct dso *machine__kernel_dso(struct machine *machine) { @@ -2371,10 +2370,6 @@ static int add_callchain_ip(struct thread *thread, ms.maps = al.maps; ms.map = al.map; ms.sym = al.sym; - - if (!branch && append_inlines(cursor, &ms, ip) == 0) - return 0; - srcline = callchain_srcline(&ms, al.addr); err = callchain_cursor_append(cursor, ip, &ms, branch, flags, nr_loop_iter, diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c index 4b85c1728012..e72bd059538c 100644 --- a/tools/perf/util/thread-stack.c +++ b/tools/perf/util/thread-stack.c @@ -1037,9 +1037,7 @@ static int thread_stack__trace_end(struct thread_stack *ts,
static bool is_x86_retpoline(const char *name) { - const char *p = strstr(name, "__x86_indirect_thunk_"); - - return p == name || !strcmp(name, "__indirect_thunk_start"); + return strstr(name, "__x86_indirect_thunk_") == name; }
/* diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh index aff88f78e339..5ea9d63915f7 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh @@ -72,7 +72,8 @@ test_span_gre_ttl()
RET=0
- mirror_install $swp1 ingress $tundev "matchall $tcflags" + mirror_install $swp1 ingress $tundev \ + "prot ip flower $tcflags ip_prot icmp" tc filter add dev $h3 ingress pref 77 prot $prot \ flower skip_hw ip_ttl 50 action pass
linux-stable-mirror@lists.linaro.org