I'm announcing the release of the 6.10.5 kernel.
All users of the 6.10 kernel series must upgrade.
The updated 6.10.y git tree can be found at: git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-6.10.y and can be browsed at the normal kernel.org git web browser: https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git%3Ba=summa...
thanks,
greg k-h
------------
Documentation/admin-guide/cifs/usage.rst | 2 Documentation/admin-guide/kernel-parameters.txt | 4 Documentation/arch/arm64/silicon-errata.rst | 34 + Documentation/hwmon/corsair-psu.rst | 6 Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst | 4 Makefile | 2 arch/arm64/Kconfig | 62 +- arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi | 22 arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi | 6 arch/arm64/include/asm/cpucaps.h | 2 arch/arm64/include/asm/cputype.h | 10 arch/arm64/kernel/cpu_errata.c | 26 - arch/arm64/kernel/proton-pack.c | 2 arch/loongarch/kernel/efi.c | 6 arch/parisc/Kconfig | 1 arch/parisc/include/asm/cache.h | 11 arch/parisc/net/bpf_jit_core.c | 2 arch/x86/events/amd/core.c | 28 - arch/x86/events/amd/uncore.c | 8 arch/x86/events/core.c | 116 ++-- arch/x86/events/intel/core.c | 164 +++--- arch/x86/events/intel/cstate.c | 35 + arch/x86/events/intel/ds.c | 34 - arch/x86/events/intel/knc.c | 2 arch/x86/events/intel/p4.c | 10 arch/x86/events/intel/p6.c | 2 arch/x86/events/perf_event.h | 62 ++ arch/x86/events/zhaoxin/core.c | 12 arch/x86/include/asm/intel_ds.h | 1 arch/x86/include/asm/qspinlock.h | 12 arch/x86/kernel/cpu/mtrr/mtrr.c | 2 arch/x86/kernel/paravirt.c | 7 arch/x86/mm/pti.c | 8 drivers/acpi/battery.c | 16 drivers/acpi/resource.c | 14 drivers/acpi/sbs.c | 23 drivers/base/core.c | 13 drivers/base/module.c | 4 drivers/base/regmap/regmap-kunit.c | 72 +- drivers/bluetooth/btnxpuart.c | 2 drivers/clocksource/sh_cmt.c | 13 drivers/cpufreq/amd-pstate.c | 32 - drivers/cpufreq/amd-pstate.h | 1 drivers/gpio/gpiolib.c | 3 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 3 drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c | 2 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 7 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 6 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 2 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 5 drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 9 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 9 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 247 ++++++---- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 58 +- drivers/gpu/drm/amd/display/dc/core/dc_state.c | 67 +- drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c | 9 drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c | 49 + drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c | 3 drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c | 5 drivers/gpu/drm/amd/display/dc/link/link_dpms.c | 3 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c | 3 drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c | 9 drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c | 8 drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c | 8 drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c | 57 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c | 14 drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c | 36 + drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 16 drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c | 5 drivers/gpu/drm/display/drm_dp_mst_topology.c | 11 drivers/gpu/drm/drm_atomic_uapi.c | 15 drivers/gpu/drm/drm_client_modeset.c | 5 drivers/gpu/drm/i915/display/intel_backlight.c | 3 drivers/gpu/drm/i915/display/intel_pps.c | 3 drivers/gpu/drm/i915/gem/i915_gem_mman.c | 55 +- drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 13 drivers/gpu/drm/lima/lima_drv.c | 1 drivers/gpu/drm/mgag200/mgag200_i2c.c | 8 drivers/gpu/drm/radeon/pptable.h | 2 drivers/gpu/drm/tests/drm_gem_shmem_test.c | 11 drivers/gpu/drm/xe/regs/xe_engine_regs.h | 4 drivers/gpu/drm/xe/xe_guc_submit.c | 2 drivers/gpu/drm/xe/xe_hwmon.c | 3 drivers/gpu/drm/xe/xe_lrc.c | 17 drivers/gpu/drm/xe/xe_preempt_fence.c | 14 drivers/gpu/drm/xe/xe_rtp.c | 2 drivers/gpu/drm/xe/xe_sync.c | 2 drivers/hwmon/corsair-psu.c | 7 drivers/i2c/busses/i2c-qcom-geni.c | 5 drivers/i2c/i2c-smbus.c | 64 ++ drivers/irqchip/irq-loongarch-cpu.c | 6 drivers/irqchip/irq-mbigen.c | 20 drivers/irqchip/irq-meson-gpio.c | 14 drivers/irqchip/irq-riscv-aplic-msi.c | 32 + drivers/irqchip/irq-xilinx-intc.c | 2 drivers/md/md.c | 15 drivers/md/md.h | 2 drivers/md/raid1.c | 3 drivers/md/raid10.c | 3 drivers/md/raid5.c | 23 drivers/media/i2c/ov5647.c | 11 drivers/media/pci/intel/ipu6/Kconfig | 3 drivers/media/platform/amphion/vdec.c | 2 drivers/media/platform/amphion/venc.c | 2 drivers/media/tuners/xc2028.c | 9 drivers/media/usb/uvc/uvc_video.c | 37 + drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c | 2 drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c | 125 ++--- drivers/net/can/spi/mcp251xfd/mcp251xfd.h | 13 drivers/net/dsa/bcm_sf2.c | 4 drivers/net/dsa/microchip/ksz_common.c | 16 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 13 drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c | 14 drivers/net/ethernet/freescale/fec_ptp.c | 3 drivers/net/ethernet/google/gve/gve_ethtool.c | 2 drivers/net/ethernet/google/gve/gve_main.c | 12 drivers/net/ethernet/intel/ice/ice_main.c | 2 drivers/net/ethernet/intel/idpf/idpf_lib.c | 48 - drivers/net/ethernet/intel/idpf/idpf_txrx.c | 43 - drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 3 drivers/net/ethernet/mellanox/mlxsw/pci.c | 6 drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c | 23 drivers/net/pse-pd/tps23881.c | 5 drivers/net/usb/qmi_wwan.c | 1 drivers/net/virtio_net.c | 8 drivers/net/wireless/ath/ath12k/dp_rx.c | 1 drivers/net/wireless/ath/ath12k/pci.c | 4 drivers/net/wireless/realtek/rtlwifi/usb.c | 34 + drivers/net/wireless/realtek/rtw89/pci.c | 13 drivers/nvme/host/apple.c | 27 - drivers/nvme/host/pci.c | 6 drivers/platform/chrome/cros_ec_lpc.c | 50 +- drivers/platform/x86/intel/ifs/runtest.c | 2 drivers/platform/x86/intel/vbtn.c | 9 drivers/power/supply/axp288_charger.c | 22 drivers/power/supply/qcom_battmgr.c | 8 drivers/power/supply/rt5033_battery.c | 1 drivers/s390/char/sclp_sd.c | 10 drivers/scsi/mpi3mr/mpi3mr_os.c | 11 drivers/scsi/mpt3sas/mpt3sas_base.c | 20 drivers/scsi/sd.c | 5 drivers/soc/qcom/icc-bwmon.c | 12 drivers/spi/spi-fsl-lpspi.c | 6 drivers/spi/spidev.c | 1 drivers/spmi/spmi-pmic-arb.c | 11 drivers/thermal/intel/intel_hfi.c | 30 - drivers/tty/serial/sc16is7xx.c | 25 - drivers/tty/serial/serial_core.c | 8 drivers/tty/vt/conmakehash.c | 20 drivers/ufs/core/ufshcd-priv.h | 5 drivers/ufs/core/ufshcd.c | 19 drivers/usb/gadget/function/f_fs.c | 6 drivers/usb/gadget/function/f_midi2.c | 21 drivers/usb/gadget/function/u_audio.c | 42 + drivers/usb/gadget/function/u_serial.c | 1 drivers/usb/gadget/udc/core.c | 10 drivers/usb/serial/usb_debug.c | 7 drivers/usb/typec/mux/fsa4480.c | 14 drivers/usb/usbip/vhci_hcd.c | 9 drivers/vhost/vdpa.c | 8 drivers/xen/privcmd.c | 25 - fs/btrfs/ctree.c | 57 +- fs/btrfs/ctree.h | 11 fs/btrfs/defrag.c | 2 fs/btrfs/disk-io.c | 4 fs/btrfs/extent-tree.c | 46 - fs/btrfs/extent-tree.h | 8 fs/btrfs/extent_io.c | 4 fs/btrfs/file.c | 60 +- fs/btrfs/free-space-cache.c | 1 fs/btrfs/free-space-tree.c | 10 fs/btrfs/ioctl.c | 6 fs/btrfs/print-tree.c | 2 fs/btrfs/qgroup.c | 6 fs/btrfs/relocation.c | 8 fs/btrfs/transaction.c | 8 fs/buffer.c | 2 fs/ext4/inline.c | 6 fs/ext4/inode.c | 5 fs/jbd2/journal.c | 1 fs/nfsd/nfsctl.c | 3 fs/smb/client/cifs_debug.c | 2 fs/smb/client/cifsglob.h | 8 fs/smb/client/inode.c | 17 fs/smb/client/misc.c | 9 fs/smb/client/reparse.c | 4 fs/smb/client/reparse.h | 19 fs/smb/client/smb2inode.c | 2 fs/smb/client/smb2pdu.c | 3 fs/tracefs/event_inode.c | 4 fs/tracefs/inode.c | 12 fs/tracefs/internal.h | 5 fs/udf/balloc.c | 36 - include/linux/blk-integrity.h | 16 include/linux/fs.h | 2 include/linux/pci_ids.h | 2 include/linux/profile.h | 1 include/linux/rcupdate.h | 2 include/linux/trace_events.h | 1 include/linux/virtio_net.h | 16 include/sound/cs35l56.h | 14 io_uring/net.c | 7 kernel/bpf/verifier.c | 17 kernel/irq/irqdesc.c | 1 kernel/jump_label.c | 4 kernel/kcov.c | 15 kernel/kprobes.c | 4 kernel/locking/qspinlock_paravirt.h | 2 kernel/module/main.c | 41 + kernel/padata.c | 7 kernel/pid_namespace.c | 17 kernel/profile.c | 11 kernel/rcu/rcutorture.c | 2 kernel/rcu/tasks.h | 16 kernel/rcu/tree.c | 10 kernel/sched/core.c | 68 +- kernel/sched/cputime.c | 6 kernel/sched/stats.c | 10 kernel/time/clocksource.c | 2 kernel/time/ntp.c | 9 kernel/time/tick-broadcast.c | 3 kernel/time/timekeeping.c | 2 kernel/trace/trace.h | 23 kernel/trace/trace_events.c | 33 - kernel/trace/trace_events_hist.c | 4 kernel/trace/trace_events_inject.c | 2 kernel/trace/trace_events_trigger.c | 6 kernel/trace/tracing_map.c | 6 lib/debugobjects.c | 21 mm/list_lru.c | 28 - mm/memcontrol.c | 22 mm/slub.c | 3 net/bluetooth/hci_sync.c | 14 net/bluetooth/l2cap_core.c | 1 net/bridge/br_multicast.c | 4 net/core/link_watch.c | 4 net/ipv4/tcp_ao.c | 43 + net/ipv4/tcp_offload.c | 3 net/ipv4/udp_offload.c | 4 net/l2tp/l2tp_core.c | 15 net/mac80211/agg-tx.c | 4 net/mptcp/options.c | 3 net/mptcp/pm_netlink.c | 47 + net/sctp/input.c | 19 net/smc/smc_stats.h | 2 net/sunrpc/sched.c | 4 net/unix/af_unix.c | 34 - net/wireless/nl80211.c | 37 + sound/pci/hda/patch_hdmi.c | 2 sound/pci/hda/patch_realtek.c | 1 sound/soc/amd/yc/acp6x-mach.c | 7 sound/soc/codecs/cs-amp-lib.c | 2 sound/soc/codecs/cs35l56-sdw.c | 77 +++ sound/soc/codecs/cs35l56-shared.c | 101 ---- sound/soc/codecs/cs35l56.c | 205 -------- sound/soc/codecs/cs35l56.h | 1 sound/soc/codecs/wcd938x-sdw.c | 4 sound/soc/codecs/wcd939x-sdw.c | 4 sound/soc/codecs/wsa881x.c | 2 sound/soc/codecs/wsa883x.c | 10 sound/soc/codecs/wsa884x.c | 10 sound/soc/meson/axg-fifo.c | 26 - sound/soc/sof/mediatek/mt8195/mt8195.c | 2 sound/soc/sti/sti_uniperif.c | 2 sound/soc/sti/uniperif.h | 1 sound/soc/sti/uniperif_player.c | 1 sound/soc/sti/uniperif_reader.c | 1 sound/usb/line6/driver.c | 5 sound/usb/quirks-table.h | 4 tools/testing/selftests/bpf/prog_tests/send_signal.c | 3 tools/testing/selftests/devices/ksft.py | 2 tools/testing/selftests/mm/Makefile | 2 tools/testing/selftests/net/mptcp/mptcp_join.sh | 55 +- 274 files changed, 2688 insertions(+), 1723 deletions(-)
Abdulrasaq Lawani (1): media: i2c: ov5647: replacing of_node_put with __free(device_node)
Alex Hung (1): drm/amd/display: Add null checker before passing variables
Alexander Lobakin (2): idpf: fix memory leaks and crashes while performing a soft reset idpf: fix UAFs when destroying the queues
Andi Kleen (1): x86/mtrr: Check if fixed MTRRs exist before saving them
Andi Shyti (2): drm/i915/gem: Fix Virtual Memory mapping boundaries calculation drm/i915/gem: Adjust vma offset for framebuffer mmap offset
Andrey Konovalov (1): kcov: properly check for softirq context
Anton Khirnov (1): Bluetooth: hci_sync: avoid dup filtering when passive scanning with adv monitor
Arnd Bergmann (2): net: pse-pd: tps23881: include missing bitfield.h header media: ipu-bridge: fix ipu6 Kconfig dependencies
Arseniy Krasnov (1): irqchip/meson-gpio: Convert meson_gpio_irq_controller::lock to 'raw_spinlock_t'
Aurabindo Pillai (1): drm/amd/display: Fix null pointer deref in dcn20_resource.c
Baochen Qiang (2): wifi: ath12k: fix race due to setting ATH12K_FLAG_EXT_IRQ_ENABLED too early wifi: ath12k: fix memory leak in ath12k_dp_rx_peer_frag_setup()
Bartosz Golaszewski (1): net: stmmac: qcom-ethqos: enable SGMII loopback during DMA reset on sa8775p-ride-r3
Ben Walsh (1): platform/chrome: cros_ec_lpc: Add a new quirk for ACPI id
Benjamin Coddington (1): SUNRPC: Fix a race to wake a sync task
Bill Wendling (1): drm/radeon: Remove __counted_by from StateArray.states[]
Bingbu Cao (1): media: intel/ipu6: select AUXILIARY_BUS in Kconfig
Bob Zhou (1): drm/amd/pm: Fix the null pointer dereference for vega10_hwmgr
Breno Leitao (1): debugobjects: Annotate racy debug variables
Chen Yu (1): x86/paravirt: Fix incorrect virt spinlock setting on bare metal
Chi Zhiling (1): media: xc2028: avoid use-after-free in load_firmware_cb()
Chris Wulff (2): usb: gadget: core: Check for unset descriptor usb: gadget: u_audio: Check return codes from usb_ep_enable and config_ep_by_speed.
Csókás, Bence (1): net: fec: Stop PPS on driver remove
Curtis Malainey (1): ASoC: SOF: Remove libraries from topology lookups
Damien Le Moal (2): scsi: mpt3sas: Avoid IOMMU page faults on REPORT ZONES scsi: mpi3mr: Avoid IOMMU page faults on REPORT ZONES
Dan Williams (1): driver core: Fix uevent_show() vs driver detach race
Daniele Palmas (1): net: usb: qmi_wwan: fix memory leak for not ip packets
Dave Airlie (1): drm/test: fix the gem shmem test to map the sg table.
David Collins (1): spmi: pmic-arb: add missing newline in dev_err format strings
David Gow (2): drm/i915: Allow evicting to use the requested placement drm/i915: Attempt to get pages without eviction first
Dmitry Antipov (1): Bluetooth: l2cap: always unlock channel in l2cap_conless_channel()
Dmitry Safonov (1): net/tcp: Disable TCP-AO static key after RCU grace period
Dnyaneshwar Bhadane (1): drm/i915/display: correct dual pps handling for MTL_PCH+
Dragan Simic (1): drm/lima: Mark simple_ondemand governor as softdep
Dragos Tatulea (1): net/mlx5e: SHAMPO, Fix invalid WQ linked list unlink
Dustin L. Howett (1): ALSA: hda/realtek: Add Framework Laptop 13 (Intel Core Ultra) to quirks
Eric Dumazet (1): net: linkwatch: use system_unbound_wq
FUJITA Tomonori (1): PCI: Add Edimax Vendor ID to pci_ids.h
Fangzhi Zuo (1): drm/amd/display: Skip Recompute DSC Params if no Stream on Link
Filipe Manana (6): btrfs: do not BUG_ON() when freeing tree block after error btrfs: reduce nesting for extent processing at btrfs_lookup_extent_info() btrfs: fix data race when accessing the last_trans field of a root btrfs: fix bitmap leak when loading free space cache on duplicate entry btrfs: fix corruption after buffer fault in during direct IO append write btrfs: fix double inode unlock for direct IO sync writes
Florian Fainelli (1): net: bcmgenet: Properly overlay PHY and MAC Wake-on-LAN capabilities
Francesco Dolcini (1): arm64: dts: ti: k3-am62-verdin-dahlia: Keep CTRL_SLEEP_MOCI# regulator on
Frederic Weisbecker (2): Revert "rcu-tasks: Fix synchronize_rcu_tasks() VS zap_pid_ns_processes()" rcu: Fix rcu_barrier() VS post CPUHP_TEARDOWN_CPU invocation
Gaosheng Cui (2): i2c: qcom-geni: Add missing clk_disable_unprepare in geni_i2c_runtime_resume i2c: qcom-geni: Add missing geni_icc_disable in geni_i2c_runtime_resume
Geert Uytterhoeven (1): spi: spidev: Add missing spi_device_id for bh2228fv
George Kennedy (1): serial: core: check uartclk for zero to avoid divide by zero
Gleb Korobeynikov (1): cifs: cifs_inval_name_dfs_link_error: correct the check for fullpath
Greg Kroah-Hartman (1): Linux 6.10.5
Grzegorz Nitka (1): ice: Fix reset handler
Guenter Roeck (2): i2c: smbus: Improve handling of stuck alerts i2c: smbus: Send alert notifications to all devices if source not found
Hagar Hemdan (1): gpio: prevent potential speculation leaks in gpio_device_get_desc()
Hans de Goede (3): platform/x86: intel-vbtn: Protect ACPI notify handler against recursion power: supply: axp288_charger: Fix constant_charge_voltage writes power: supply: axp288_charger: Round constant_charge_voltage writes down
Heng Qi (1): virtio-net: unbreak vq resizing when coalescing is not negotiated
Huacai Chen (1): irqchip/loongarch-cpu: Fix return value of lpic_gsi_to_irq()
Hugo Villeneuve (2): serial: sc16is7xx: fix TX fifo corruption serial: sc16is7xx: fix invalid FIFO access with special register set
Ido Schimmel (1): mlxsw: pci: Lock configuration space of upstream bridge during reset
Ivan Lipski (1): Revert "drm/amd/display: Add NULL check for 'afb' before dereferencing in amdgpu_dm_plane_handle_cursor_update"
James Chapman (1): l2tp: fix lockdep splat
Jason Wang (1): vhost-vdpa: switch to use vmf_insert_pfn() in the fault handler
Jean-Michel Hautbois (1): media: v4l: Fix missing tabular column hint for Y14P format
Jeff Layton (1): nfsd: don't set SVC_SOCK_ANONYMOUS when creating nfsd sockets
Jens Axboe (4): io_uring/net: ensure expanded bundle recv gets marked for cleanup io_uring/net: ensure expanded bundle send gets marked for cleanup io_uring/net: don't pick multiple buffers for non-bundle send block: use the right type for stub rq_integrity_vec()
Jerome Audu (1): ASoC: sti: add missing probe entry for player and reader
Jerome Brunet (1): ASoC: meson: axg-fifo: fix irq scheduling issue with PREEMPT_RT
Jesse Zhang (1): drm/admgpu: fix dereferencing null pointer context
Joe Hattori (1): net: dsa: bcm_sf2: Fix a possible memory leak in bcm_sf2_mdio_register()
Johan Hovold (1): scsi: Revert "scsi: sd: Do not repeat the starting disk message"
Johannes Berg (2): wifi: nl80211: disallow setting special AP channel widths wifi: nl80211: don't give key data to userspace
Jonathan Cavitt (1): drm/xe/xe_guc_submit: Fix exec queue stop race condition
Joshua Ashton (1): drm/amdgpu: Forward soft recovery errors to userspace
Justin Stitt (2): ntp: Clamp maxerror and esterror to operating range ntp: Safeguard against time_constant overflow
Kan Liang (2): perf/x86/intel: Support the PEBS event mask perf/x86: Support counter mask
Karthik Poosa (1): drm/xe/hwmon: Fix PL1 disable flow in xe_hwmon_power_max_write
Keith Busch (1): nvme: apple: fix device reference counting
Kemeng Shi (1): jbd2: avoid memleak in jbd2_journal_write_metadata_buffer
Konrad Dybcio (2): usb: typec: fsa4480: Check if the chip is really there spmi: pmic-arb: Pass the correct of_node to irq_domain_add_tree
Krzysztof Kozlowski (5): ASoC: codecs: wcd938x-sdw: Correct Soundwire ports mask ASoC: codecs: wcd939x-sdw: Correct Soundwire ports mask ASoC: codecs: wsa881x: Correct Soundwire ports mask ASoC: codecs: wsa883x: Correct Soundwire ports mask ASoC: codecs: wsa884x: Correct Soundwire ports mask
Kuniyuki Iwashima (2): sctp: Fix null-ptr-deref in reuseport_add_sock(). af_unix: Don't retry after unix_state_lock_nested() in unix_stream_connect().
Kuppuswamy Sathyanarayanan (1): platform/x86/intel/ifs: Initialize union ifs_status to zero
Kyle Swenson (1): net: pse-pd: tps23881: Fix the device ID check
Laura Nao (1): selftests: ksft: Fix finished() helper exit code on skipped tests
Li Huafei (1): perf/x86: Fix smp_processor_id()-in-preemptible warnings
Li Nan (2): md: do not delete safemode_timer in mddev_suspend md: change the return value type of md_write_start to void
Linus Torvalds (2): module: warn about excessively long module waits module: make waiting for a concurrent module loader interruptible
Lucas De Marchi (1): drm/xe/rtp: Fix off-by-one when processing rules
Lucas Stach (1): drm/bridge: analogix_dp: properly handle zero sized AUX transactions
Luke Wang (1): Bluetooth: btnxpuart: Shutdown timer and prevent rearming when driver unloading
Ma Jun (4): drm/amdgpu/pm: Fix the param type of set_power_profile_mode drm/amdgpu/pm: Fix the null pointer dereference for smu7 drm/amdgpu: Fix the null pointer dereference to ras_manager drm/amdgpu/pm: Fix the null pointer dereference in apply_state_adjust_rules
Ma Ke (1): drm/client: fix null pointer dereference in drm_client_modeset_probe
Manivannan Sadhasivam (1): scsi: ufs: core: Do not set link to OFF state while waking up from hibernation
Marc Kleine-Budde (2): can: mcp251xfd: tef: prepare to workaround broken TEF FIFO tail index erratum can: mcp251xfd: tef: update workaround for erratum DS80000789E 6 of mcp2518fd
Marek Marczykowski-Górecki (1): USB: serial: debug: do not echo input by default
Mario Limonciello (1): cpufreq: amd-pstate: Allow users to write 'default' EPP string
Mark Rutland (8): arm64: cputype: Add Cortex-X3 definitions arm64: cputype: Add Cortex-A720 definitions arm64: cputype: Add Cortex-X925 definitions arm64: errata: Unify speculative SSBS errata logic arm64: errata: Expand speculative SSBS workaround arm64: cputype: Add Cortex-X1C definitions arm64: cputype: Add Cortex-A725 definitions arm64: errata: Expand speculative SSBS workaround (again)
Martin Whitaker (1): net: dsa: microchip: disable EEE for KSZ8567/KSZ9567/KSZ9896/KSZ9897.
Masami Hiramatsu (Google) (1): kprobes: Fix to check symbol prefixes correctly
Mathias Krause (3): tracefs: Fix inode allocation eventfs: Don't return NULL in eventfs_create_dir() eventfs: Use SRCU for freeing eventfs_inodes
Matt Bobrowski (1): bpf: add missing check_func_arg_reg_off() to prevent out-of-bounds memory accesses
Matthew Auld (1): drm/xe/preempt_fence: enlarge the fence critical section
Matthew Brost (2): drm/xe: Use dma_fence_chain_free in chain fence unused as a sync drm/xe: Take ref to VM in delayed snapshot
Matthieu Baerts (NGI0) (7): mptcp: fully established after ADD_ADDR echo on MPJ mptcp: pm: deny endp with signal + subflow + port mptcp: pm: reduce indentation blocks mptcp: pm: don't try to create sf if alloc failed mptcp: pm: do not ignore 'subflow' if 'signal' flag is also set selftests: mptcp: join: ability to invert ADD_ADDR check selftests: mptcp: join: test both signal & subflow
Max Krummenacher (1): tty: vt: conmakehash: cope with abs_srctree no longer in env
Menglong Dong (1): bpf: kprobe: remove unused declaring of bpf_kprobe_override
Miao Wang (1): LoongArch: Enable general EFI poweroff method
Michael Chan (1): bnxt_en : Fix memory out-of-bounds in bnxt_fill_hw_rss_tbl()
Michael Strauss (1): drm/amd/display: Add delay to improve LTTPR UHBR interop
Michal Kubiak (1): idpf: fix memleak in vport interrupt configuration
Michal Pecio (1): media: uvcvideo: Fix the bandwdith quirk on USB 3.x
Mikulas Patocka (3): block: change rq_integrity_vec to respect the iterator parisc: fix unaligned accesses in BPF parisc: fix a possible DMA corruption
Ming Qian (1): media: amphion: Remove lock in s_ctrl callback
Muchun Song (1): mm: list_lru: fix UAF for memory cgroup
Natanel Roizenman (1): drm/amd/display: Add null check in resource_log_pipe_topology_update
Neil Armstrong (1): power: supply: qcom_battmgr: return EAGAIN when firmware service is not up
Nicholas Kazlauskas (1): drm/amd/display: Wake DMCUB before sending a command for replay feature
Nico Pache (1): selftests: mm: add s390 to ARCH check
Nikita Travkin (1): power: supply: rt5033: Bring back i2c_set_clientdata
Niklas Söderlund (1): clocksource/drivers/sh_cmt: Address race condition for clock events
Nikolay Aleksandrov (1): net: bridge: mcast: wait for previous gc cycles when removing port
Niranjana Vishwanathapura (1): drm/xe: Minor cleanup in LRC handling
Oliver Neukum (1): usb: vhci-hcd: Do not drop references before new references are gained
Paul E. McKenney (2): rcutorture: Fix rcu_torture_fwd_cb_cr() data race clocksource: Fix brown-bag boolean thinko in cs_watchdog_read()
Paulo Alcantara (1): smb: client: handle lack of FSCTL_GET_REPARSE_POINT support
Perry Yuan (1): cpufreq: amd-pstate: auto-load pstate driver by default
Peter Oberparleiter (1): s390/sclp: Prevent release of buffer in I/O
Peter Wang (1): scsi: ufs: core: Fix deadlock during RTC update
Peter Zijlstra (3): jump_label: Fix the fix, brown paper bags galore x86/mm: Fix pti_clone_pgtable() alignment assumption x86/mm: Fix pti_clone_entry_text() for i386
Ping-Ke Shih (2): wifi: rtlwifi: handle return value of usb init TX/RX wifi: rtw89: pci: fix RX tag race condition resulting in wrong RX length
Prashanth K (1): usb: gadget: u_serial: Set start_delayed during suspend
Praveen Kaligineedi (1): gve: Fix use of netif_carrier_ok()
Qu Wenruo (2): btrfs: do not clear page dirty inside extent_write_locked_range() btrfs: avoid using fixed char array size for tree names
Radhey Shyam Pandey (1): irqchip/xilinx: Fix shift out of bounds
Ramesh Errabolu (1): drm/amd/amdkfd: Fix a resource leak in svm_range_validate_and_map()
Ricardo Ribalda (1): media: uvcvideo: Ignore empty TS packets
Richard Fitzgerald (4): regmap: kunit: Fix memory leaks in gen_regmap() and gen_raw_regmap() ASoC: cs-amp-lib: Fix NULL pointer crash if efi.get_variable is NULL ASoC: cs35l56: Revert support for dual-ownership of ASP registers ASoC: cs35l56: Handle OTP read latency over SoundWire
Rik van Riel (1): mm, slub: do not call do_slab_free for kfence object
Rodrigo Siqueira (2): drm/amd/display: Fix NULL pointer dereference for DTN log in DCN401 drm/amd/display: Replace dm_execute_dmub_cmd with dc_wake_and_execute_dmub_cmd
Roman Smirnov (1): udf: prevent integer overflow in udf_bitmap_free_blocks()
Shakeel Butt (1): memcg: protect concurrent access to mem_cgroup_idr
Shay Drory (1): genirq/irqdesc: Honor caller provided affinity in alloc_desc()
Sibi Sankar (1): soc: qcom: icc-bwmon: Allow for interrupts to be shared across instances
Simon Ser (1): drm/atomic: allow no-op FB_ID updates for async flips
Srinivas Kandagatla (2): ASoC: codecs: wsa883x: parse port-mapping information ASoC: codecs: wsa884x: parse port-mapping information
Srinivasan Shanmugam (2): drm/amd/display: Add null checks for 'stream' and 'plane' before dereferencing drm/amd/display: Add NULL check for 'afb' before dereferencing in amdgpu_dm_plane_handle_cursor_update
Stefan Wahren (1): spi: spi-fsl-lpspi: Fix scldiv calculation
Steve French (1): smb3: fix setting SecurityFlags when encryption is required
Steven 'Steve' Kendall (1): ALSA: hda: Add HP MP9 G4 Retail System AMS to force connect list
Steven Rostedt (2): tracefs: Use generic inode RCU for synchronizing freeing tracing: Have format file honor EVENT_FILE_FL_FREED
Sung-huai Wang (2): drm/amd/display: Handle HPD_IRQ for internal link Revert "drm/amd/display: Handle HPD_IRQ for internal link"
Swapnil Patel (1): drm/amd/display: Change ASSR disable sequence
Takashi Iwai (5): ALSA: usb-audio: Re-add ScratchAmp quirk entries ALSA: line6: Fix racy access to midibuf ALSA: hda/hdmi: Yet more pin fix for HP EliteDesk 800 G4 usb: gadget: midi2: Fix the response for FB info with block 0xff ASoC: amd: yc: Add quirk entry for OMEN by HP Gaming Laptop 16-n0xxx
Tamim Khan (2): ACPI: resource: Skip IRQ override on Asus Vivobook Pro N6506MU ACPI: resource: Skip IRQ override on Asus Vivobook Pro N6506MJ
Tetsuo Handa (1): profiling: remove profile=sleep support
Thomas Gleixner (2): tick/broadcast: Move per CPU pointer access into the atomic section timekeeping: Fix bogus clock_was_set() invocation in do_adjtimex()
Thomas Weißschuh (2): ACPI: battery: create alarm sysfs attribute atomically ACPI: SBS: manage alarm sysfs attribute through psy core
Thomas Zimmermann (2): drm/mgag200: Set DDC timeout in milliseconds drm/mgag200: Bind I2C lifetime to DRM device
Tim Huang (1): drm/amdgpu: fix potential resource leak warning
Tristram Ha (1): net: dsa: microchip: Fix Wake-on-LAN check to not return an error
Tudor Ambarus (1): usb: gadget: f_fs: restore ffs_func_disable() functionality
Tze-nan Wu (1): tracing: Fix overflow in get_free_elt()
Uros Bizjak (2): locking/pvqspinlock: Correct the type of "old" variable in pv_kick_node() perf/x86/amd: Use try_cmpxchg() in events/amd/{un,}core.c
Vamshi Gajjela (1): scsi: ufs: core: Fix hba->last_dme_cmd_tstamp timestamp updating logic
Victor Skvortsov (1): drm/amdgpu: Add lock around VF RLCG interface
Viresh Kumar (1): xen: privcmd: Switch from mutex to spinlock for irqfds
Waiman Long (1): padata: Fix possible divide-by-0 panic in padata_mt_helper()
Wayne Lin (3): drm/amd/display: Refactor function dm_dp_mst_is_port_support_mode() drm/amd/display: Don't refer to dc_sink in is_dsc_need_re_compute drm/dp_mst: Skip CSN if topology probing is not done yet
Wenjing Liu (2): drm/amd/display: reduce ODM slice count to initial new dc state only when needed drm/amd/display: remove dpp pipes on failure to update pipe params
Wilken Gottwalt (1): hwmon: corsair-psu: add USB id of HX1200i Series 2023 psu
Willem de Bruijn (1): net: drop bad gso csum_start and offset in virtio_net_hdr
Wojciech Gładysz (1): ext4: sanity check for NULL pointer after ext4_force_shutdown
Xiaxi Shen (1): ext4: fix uninitialized variable in ext4_inlinedir_to_tree
Yang Yingliang (4): sched/smt: Introduce sched_smt_present_inc/dec() helper sched/smt: Fix unbalance sched_smt_present dec/inc sched/core: Introduce sched_set_rq_on/offline() helper sched/core: Fix unbalance set_rq_online/offline() in sched_cpu_deactivate()
Yipeng Zou (1): irqchip/mbigen: Fix mbigen node address layout
Yong-Xuan Wang (1): irqchip/riscv-aplic: Retrigger MSI interrupt on source configuration
Yonghong Song (1): selftests/bpf: Fix send_signal test with nested CONFIG_PARAVIRT
Yu Kuai (1): md/raid5: avoid BUG_ON() while continue reshape after reassembling
Zhang Rui (3): perf/x86/intel/cstate: Add Arrowlake support perf/x86/intel/cstate: Add Lunarlake support thermal: intel: hfi: Give HFI instances package scope
Zheng Zucheng (1): sched/cputime: Fix mul_u64_u64_div_u64() precision for cputime
Zhengchao Shao (1): net/smc: add the max value of fallback reason count
Zhenyu Wang (1): perf/x86/intel/cstate: Add pkg C2 residency counter for Sierra Forest
Zong-Zhe Yang (1): wifi: mac80211: fix NULL dereference at band check in starting tx ba session
diff --git a/Documentation/admin-guide/cifs/usage.rst b/Documentation/admin-guide/cifs/usage.rst index fd4b56c0996f..c09674a75a9e 100644 --- a/Documentation/admin-guide/cifs/usage.rst +++ b/Documentation/admin-guide/cifs/usage.rst @@ -742,7 +742,7 @@ SecurityFlags Flags which control security negotiation and may use NTLMSSP 0x00080 must use NTLMSSP 0x80080 seal (packet encryption) 0x00040 - must seal (not implemented yet) 0x40040 + must seal 0x40040
cifsFYI If set to non-zero value, additional debug information will be logged to the system error log. This field diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 2569e7f19b47..c82446cef8e2 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4801,11 +4801,9 @@
profile= [KNL] Enable kernel profiling via /proc/profile Format: [<profiletype>,]<number> - Param: <profiletype>: "schedule", "sleep", or "kvm" + Param: <profiletype>: "schedule" or "kvm" [defaults to kernel profiling] Param: "schedule" - profile schedule points. - Param: "sleep" - profile D-state sleeping (millisecs). - Requires CONFIG_SCHEDSTATS Param: "kvm" - profile VM exits. Param: <number> - step/bucket size as a power of 2 for statistical time based profiling. diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst index eb8af8032c31..50327c05be8d 100644 --- a/Documentation/arch/arm64/silicon-errata.rst +++ b/Documentation/arch/arm64/silicon-errata.rst @@ -122,26 +122,50 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A76 | #1490853 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A76 | #3324349 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A77 | #1491015 | N/A | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A77 | #3324348 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A78 | #3324344 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A78C | #3324346,3324347| ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A710 | #3324338 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A715 | #2645198 | ARM64_ERRATUM_2645198 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A720 | #3456091 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A725 | #3456106 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-X1 | #1502854 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-X1 | #3324344 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-X1C | #3324346 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-X2 | #3324338 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-X3 | #3324335 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-X4 | #3194386 | ARM64_ERRATUM_3194386 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-X925 | #3324334 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1349291 | N/A | @@ -150,15 +174,23 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Neoverse-N1 | #3324349 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N2 | #2139208 | ARM64_ERRATUM_2139208 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N2 | #2067961 | ARM64_ERRATUM_2067961 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N2 | #2253138 | ARM64_ERRATUM_2253138 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Neoverse-N2 | #3324339 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-V1 | #1619801 | N/A | +----------------+-----------------+-----------------+-----------------------------+ -| ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3312417 | +| ARM | Neoverse-V1 | #3324341 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ +| ARM | Neoverse-V2 | #3324336 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ +| ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3194386 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | MMU-500 | #841119,826419 | N/A | +----------------+-----------------+-----------------+-----------------------------+ diff --git a/Documentation/hwmon/corsair-psu.rst b/Documentation/hwmon/corsair-psu.rst index 16db34d464dd..7ed794087f84 100644 --- a/Documentation/hwmon/corsair-psu.rst +++ b/Documentation/hwmon/corsair-psu.rst @@ -15,11 +15,11 @@ Supported devices:
Corsair HX850i
- Corsair HX1000i (Series 2022 and 2023) + Corsair HX1000i (Legacy and Series 2023)
- Corsair HX1200i + Corsair HX1200i (Legacy and Series 2023)
- Corsair HX1500i (Series 2022 and 2023) + Corsair HX1500i (Legacy and Series 2023)
Corsair RM550i
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst b/Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst index b3c5779521d8..2e7d0d3151a1 100644 --- a/Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst +++ b/Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst @@ -21,9 +21,9 @@ are often referred to as greyscale formats.
.. raw:: latex
- \scriptsize + \tiny
-.. tabularcolumns:: |p{3.6cm}|p{3.0cm}|p{1.3cm}|p{2.6cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}| +.. tabularcolumns:: |p{3.6cm}|p{2.4cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|
.. flat-table:: Luma-Only Image Formats :header-rows: 1 diff --git a/Makefile b/Makefile index aec5cc0babf8..f9badb79ae8f 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 10 -SUBLEVEL = 4 +SUBLEVEL = 5 EXTRAVERSION = NAME = Baby Opossum Posse
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 5d91259ee7b5..11bbdc15c6e5 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1067,48 +1067,44 @@ config ARM64_ERRATUM_3117295
If unsure, say Y.
-config ARM64_WORKAROUND_SPECULATIVE_SSBS - bool - config ARM64_ERRATUM_3194386 - bool "Cortex-X4: 3194386: workaround for MSR SSBS not self-synchronizing" - select ARM64_WORKAROUND_SPECULATIVE_SSBS - default y - help - This option adds the workaround for ARM Cortex-X4 erratum 3194386. + bool "Cortex-*/Neoverse-*: workaround for MSR SSBS not self-synchronizing" + default y + help + This option adds the workaround for the following errata: + + * ARM Cortex-A76 erratum 3324349 + * ARM Cortex-A77 erratum 3324348 + * ARM Cortex-A78 erratum 3324344 + * ARM Cortex-A78C erratum 3324346 + * ARM Cortex-A78C erratum 3324347 + * ARM Cortex-A710 erratam 3324338 + * ARM Cortex-A720 erratum 3456091 + * ARM Cortex-A725 erratum 3456106 + * ARM Cortex-X1 erratum 3324344 + * ARM Cortex-X1C erratum 3324346 + * ARM Cortex-X2 erratum 3324338 + * ARM Cortex-X3 erratum 3324335 + * ARM Cortex-X4 erratum 3194386 + * ARM Cortex-X925 erratum 3324334 + * ARM Neoverse-N1 erratum 3324349 + * ARM Neoverse N2 erratum 3324339 + * ARM Neoverse-V1 erratum 3324341 + * ARM Neoverse V2 erratum 3324336 + * ARM Neoverse-V3 erratum 3312417
On affected cores "MSR SSBS, #0" instructions may not affect subsequent speculative instructions, which may permit unexepected speculative store bypassing.
- Work around this problem by placing a speculation barrier after - kernel changes to SSBS. The presence of the SSBS special-purpose - register is hidden from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such - that userspace will use the PR_SPEC_STORE_BYPASS prctl to change - SSBS. + Work around this problem by placing a Speculation Barrier (SB) or + Instruction Synchronization Barrier (ISB) after kernel changes to + SSBS. The presence of the SSBS special-purpose register is hidden + from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such that userspace + will use the PR_SPEC_STORE_BYPASS prctl to change SSBS.
If unsure, say Y.
-config ARM64_ERRATUM_3312417 - bool "Neoverse-V3: 3312417: workaround for MSR SSBS not self-synchronizing" - select ARM64_WORKAROUND_SPECULATIVE_SSBS - default y - help - This option adds the workaround for ARM Neoverse-V3 erratum 3312417. - - On affected cores "MSR SSBS, #0" instructions may not affect - subsequent speculative instructions, which may permit unexepected - speculative store bypassing. - - Work around this problem by placing a speculation barrier after - kernel changes to SSBS. The presence of the SSBS special-purpose - register is hidden from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such - that userspace will use the PR_SPEC_STORE_BYPASS prctl to change - SSBS. - - If unsure, say Y. - - config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi index e8f4d136e5df..9202181fbd65 100644 --- a/arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi @@ -43,15 +43,6 @@ simple-audio-card,cpu { sound-dai = <&mcasp0>; }; }; - - reg_usb_hub: regulator-usb-hub { - compatible = "regulator-fixed"; - enable-active-high; - /* Verdin CTRL_SLEEP_MOCI# (SODIMM 256) */ - gpio = <&main_gpio0 31 GPIO_ACTIVE_HIGH>; - regulator-boot-on; - regulator-name = "HUB_PWR_EN"; - }; };
/* Verdin ETHs */ @@ -193,11 +184,6 @@ &ospi0 { status = "okay"; };
-/* Do not force CTRL_SLEEP_MOCI# always enabled */ -®_force_sleep_moci { - status = "disabled"; -}; - /* Verdin SD_1 */ &sdhci1 { status = "okay"; @@ -218,15 +204,7 @@ &usbss1 { };
&usb1 { - #address-cells = <1>; - #size-cells = <0>; status = "okay"; - - usb-hub@1 { - compatible = "usb424,2744"; - reg = <1>; - vdd-supply = <®_usb_hub>; - }; };
/* Verdin CTRL_WAKE1_MICO# */ diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi index 359f53f3e019..5bef31b8577b 100644 --- a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi @@ -138,12 +138,6 @@ reg_1v8_eth: regulator-1v8-eth { vin-supply = <®_1v8>; };
- /* - * By default we enable CTRL_SLEEP_MOCI#, this is required to have - * peripherals on the carrier board powered. - * If more granularity or power saving is required this can be disabled - * in the carrier board device tree files. - */ reg_force_sleep_moci: regulator-force-sleep-moci { compatible = "regulator-fixed"; enable-active-high; diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 7529c0263933..a6e5b07b64fd 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -59,7 +59,7 @@ cpucap_is_possible(const unsigned int cap) case ARM64_WORKAROUND_REPEAT_TLBI: return IS_ENABLED(CONFIG_ARM64_WORKAROUND_REPEAT_TLBI); case ARM64_WORKAROUND_SPECULATIVE_SSBS: - return IS_ENABLED(CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS); + return IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386); }
return true; diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 7b32b99023a2..5fd7caea4419 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -86,9 +86,14 @@ #define ARM_CPU_PART_CORTEX_X2 0xD48 #define ARM_CPU_PART_NEOVERSE_N2 0xD49 #define ARM_CPU_PART_CORTEX_A78C 0xD4B +#define ARM_CPU_PART_CORTEX_X1C 0xD4C +#define ARM_CPU_PART_CORTEX_X3 0xD4E #define ARM_CPU_PART_NEOVERSE_V2 0xD4F +#define ARM_CPU_PART_CORTEX_A720 0xD81 #define ARM_CPU_PART_CORTEX_X4 0xD82 #define ARM_CPU_PART_NEOVERSE_V3 0xD84 +#define ARM_CPU_PART_CORTEX_X925 0xD85 +#define ARM_CPU_PART_CORTEX_A725 0xD87
#define APM_CPU_PART_XGENE 0x000 #define APM_CPU_VAR_POTENZA 0x00 @@ -162,9 +167,14 @@ #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2) #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) #define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C) +#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C) +#define MIDR_CORTEX_X3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X3) #define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2) +#define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720) #define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4) #define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3) +#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925) +#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 828be635e7e1..f6b6b4507357 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -432,14 +432,26 @@ static const struct midr_range erratum_spec_unpriv_load_list[] = { }; #endif
-#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS -static const struct midr_range erratum_spec_ssbs_list[] = { #ifdef CONFIG_ARM64_ERRATUM_3194386 +static const struct midr_range erratum_spec_ssbs_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A720), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A725), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X3), MIDR_ALL_VERSIONS(MIDR_CORTEX_X4), -#endif -#ifdef CONFIG_ARM64_ERRATUM_3312417 + MIDR_ALL_VERSIONS(MIDR_CORTEX_X925), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3), -#endif {} }; #endif @@ -741,9 +753,9 @@ const struct arm64_cpu_capabilities arm64_errata[] = { MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)), }, #endif -#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS +#ifdef CONFIG_ARM64_ERRATUM_3194386 { - .desc = "ARM errata 3194386, 3312417", + .desc = "SSBS not fully self-synchronizing", .capability = ARM64_WORKAROUND_SPECULATIVE_SSBS, ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list), }, diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index baca47bd443c..da53722f95d4 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -567,7 +567,7 @@ static enum mitigation_state spectre_v4_enable_hw_mitigation(void) * Mitigate this with an unconditional speculation barrier, as CPUs * could mis-speculate branches and bypass a conditional barrier. */ - if (IS_ENABLED(CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS)) + if (IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386)) spec_bar();
return SPECTRE_MITIGATED; diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index 000825406c1f..2bf86aeda874 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -66,6 +66,12 @@ void __init efi_runtime_init(void) set_bit(EFI_RUNTIME_SERVICES, &efi.flags); }
+bool efi_poweroff_required(void) +{ + return efi_enabled(EFI_RUNTIME_SERVICES) && + (acpi_gbl_reduced_hardware || acpi_no_s5); +} + unsigned long __initdata screen_info_table = EFI_INVALID_TABLE_ADDR;
#if defined(CONFIG_SYSFB) || defined(CONFIG_EFI_EARLYCON) diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 9656e956ed13..4ecfa08ac3e1 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -20,6 +20,7 @@ config PARISC select ARCH_SUPPORTS_HUGETLBFS if PA20 select ARCH_SUPPORTS_MEMORY_FAILURE select ARCH_STACKWALK + select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_DEBUG_VM_PGTABLE select HAVE_RELIABLE_STACKTRACE select DMA_OPS diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h index 2a60d7a72f1f..a3f0f100f219 100644 --- a/arch/parisc/include/asm/cache.h +++ b/arch/parisc/include/asm/cache.h @@ -20,7 +20,16 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES
-#define ARCH_DMA_MINALIGN L1_CACHE_BYTES +#ifdef CONFIG_PA20 +#define ARCH_DMA_MINALIGN 128 +#else +#define ARCH_DMA_MINALIGN 32 +#endif +#define ARCH_KMALLOC_MINALIGN 16 /* ldcw requires 16-byte alignment */ + +#define arch_slab_minalign() ((unsigned)dcache_stride) +#define cache_line_size() dcache_stride +#define dma_get_cache_alignment cache_line_size
#define __read_mostly __section(".data..read_mostly")
diff --git a/arch/parisc/net/bpf_jit_core.c b/arch/parisc/net/bpf_jit_core.c index 979f45d4d1fb..06cbcd6fe87b 100644 --- a/arch/parisc/net/bpf_jit_core.c +++ b/arch/parisc/net/bpf_jit_core.c @@ -114,7 +114,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) jit_data->header = bpf_jit_binary_alloc(prog_size + extable_size, &jit_data->image, - sizeof(u32), + sizeof(long), bpf_fill_ill_insns); if (!jit_data->header) { prog = orig_prog; diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 1fc4ce44e743..920e3a640cad 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -432,8 +432,10 @@ static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc, * be removed on one CPU at a time AND PMU is disabled * when we come here */ - for (i = 0; i < x86_pmu.num_counters; i++) { - if (cmpxchg(nb->owners + i, event, NULL) == event) + for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { + struct perf_event *tmp = event; + + if (try_cmpxchg(nb->owners + i, &tmp, NULL)) break; } } @@ -499,7 +501,7 @@ __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *ev * because of successive calls to x86_schedule_events() from * hw_perf_group_sched_in() without hw_perf_enable() */ - for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) { + for_each_set_bit(idx, c->idxmsk, x86_pmu_max_num_counters(NULL)) { if (new == -1 || hwc->idx == idx) /* assign free slot, prefer hwc->idx */ old = cmpxchg(nb->owners + idx, NULL, event); @@ -542,7 +544,7 @@ static struct amd_nb *amd_alloc_nb(int cpu) /* * initialize all possible NB constraints */ - for (i = 0; i < x86_pmu.num_counters; i++) { + for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { __set_bit(i, nb->event_constraints[i].idxmsk); nb->event_constraints[i].weight = 1; } @@ -735,7 +737,7 @@ static void amd_pmu_check_overflow(void) * counters are always enabled when this function is called and * ARCH_PERFMON_EVENTSEL_INT is always set. */ - for (idx = 0; idx < x86_pmu.num_counters; idx++) { + for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { if (!test_bit(idx, cpuc->active_mask)) continue;
@@ -755,7 +757,7 @@ static void amd_pmu_enable_all(int added)
amd_brs_enable_all();
- for (idx = 0; idx < x86_pmu.num_counters; idx++) { + for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { /* only activate events which are marked as active */ if (!test_bit(idx, cpuc->active_mask)) continue; @@ -978,7 +980,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs) /* Clear any reserved bits set by buggy microcode */ status &= amd_pmu_global_cntr_mask;
- for (idx = 0; idx < x86_pmu.num_counters; idx++) { + for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { if (!test_bit(idx, cpuc->active_mask)) continue;
@@ -1313,7 +1315,7 @@ static __initconst const struct x86_pmu amd_pmu = { .addr_offset = amd_pmu_addr_offset, .event_map = amd_pmu_event_map, .max_events = ARRAY_SIZE(amd_perfmon_event_map), - .num_counters = AMD64_NUM_COUNTERS, + .cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS - 1, 0), .add = amd_pmu_add_event, .del = amd_pmu_del_event, .cntval_bits = 48, @@ -1412,7 +1414,7 @@ static int __init amd_core_pmu_init(void) */ x86_pmu.eventsel = MSR_F15H_PERF_CTL; x86_pmu.perfctr = MSR_F15H_PERF_CTR; - x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE; + x86_pmu.cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS_CORE - 1, 0);
/* Check for Performance Monitoring v2 support */ if (boot_cpu_has(X86_FEATURE_PERFMON_V2)) { @@ -1422,9 +1424,9 @@ static int __init amd_core_pmu_init(void) x86_pmu.version = 2;
/* Find the number of available Core PMCs */ - x86_pmu.num_counters = ebx.split.num_core_pmc; + x86_pmu.cntr_mask64 = GENMASK_ULL(ebx.split.num_core_pmc - 1, 0);
- amd_pmu_global_cntr_mask = (1ULL << x86_pmu.num_counters) - 1; + amd_pmu_global_cntr_mask = x86_pmu.cntr_mask64;
/* Update PMC handling functions */ x86_pmu.enable_all = amd_pmu_v2_enable_all; @@ -1452,12 +1454,12 @@ static int __init amd_core_pmu_init(void) * even numbered counter that has a consecutive adjacent odd * numbered counter following it. */ - for (i = 0; i < x86_pmu.num_counters - 1; i += 2) + for (i = 0; i < x86_pmu_max_num_counters(NULL) - 1; i += 2) even_ctr_mask |= BIT_ULL(i);
pair_constraint = (struct event_constraint) __EVENT_CONSTRAINT(0, even_ctr_mask, 0, - x86_pmu.num_counters / 2, 0, + x86_pmu_max_num_counters(NULL) / 2, 0, PERF_X86_EVENT_PAIR);
x86_pmu.get_event_constraints = amd_get_event_constraints_f17h; diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 5a4bfe9aea23..0bfde2ea5cb8 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -162,7 +162,9 @@ static int amd_uncore_add(struct perf_event *event, int flags) /* if not, take the first available counter */ hwc->idx = -1; for (i = 0; i < pmu->num_counters; i++) { - if (cmpxchg(&ctx->events[i], NULL, event) == NULL) { + struct perf_event *tmp = NULL; + + if (try_cmpxchg(&ctx->events[i], &tmp, event)) { hwc->idx = i; break; } @@ -196,7 +198,9 @@ static void amd_uncore_del(struct perf_event *event, int flags) event->pmu->stop(event, PERF_EF_UPDATE);
for (i = 0; i < pmu->num_counters; i++) { - if (cmpxchg(&ctx->events[i], event, NULL) == event) + struct perf_event *tmp = event; + + if (try_cmpxchg(&ctx->events[i], &tmp, NULL)) break; }
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index acd367c45334..83d12dd3f831 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -189,29 +189,31 @@ static DEFINE_MUTEX(pmc_reserve_mutex);
#ifdef CONFIG_X86_LOCAL_APIC
-static inline int get_possible_num_counters(void) +static inline u64 get_possible_counter_mask(void) { - int i, num_counters = x86_pmu.num_counters; + u64 cntr_mask = x86_pmu.cntr_mask64; + int i;
if (!is_hybrid()) - return num_counters; + return cntr_mask;
for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) - num_counters = max_t(int, num_counters, x86_pmu.hybrid_pmu[i].num_counters); + cntr_mask |= x86_pmu.hybrid_pmu[i].cntr_mask64;
- return num_counters; + return cntr_mask; }
static bool reserve_pmc_hardware(void) { - int i, num_counters = get_possible_num_counters(); + u64 cntr_mask = get_possible_counter_mask(); + int i, end;
- for (i = 0; i < num_counters; i++) { + for_each_set_bit(i, (unsigned long *)&cntr_mask, X86_PMC_IDX_MAX) { if (!reserve_perfctr_nmi(x86_pmu_event_addr(i))) goto perfctr_fail; }
- for (i = 0; i < num_counters; i++) { + for_each_set_bit(i, (unsigned long *)&cntr_mask, X86_PMC_IDX_MAX) { if (!reserve_evntsel_nmi(x86_pmu_config_addr(i))) goto eventsel_fail; } @@ -219,13 +221,14 @@ static bool reserve_pmc_hardware(void) return true;
eventsel_fail: - for (i--; i >= 0; i--) + end = i; + for_each_set_bit(i, (unsigned long *)&cntr_mask, end) release_evntsel_nmi(x86_pmu_config_addr(i)); - - i = num_counters; + i = X86_PMC_IDX_MAX;
perfctr_fail: - for (i--; i >= 0; i--) + end = i; + for_each_set_bit(i, (unsigned long *)&cntr_mask, end) release_perfctr_nmi(x86_pmu_event_addr(i));
return false; @@ -233,9 +236,10 @@ static bool reserve_pmc_hardware(void)
static void release_pmc_hardware(void) { - int i, num_counters = get_possible_num_counters(); + u64 cntr_mask = get_possible_counter_mask(); + int i;
- for (i = 0; i < num_counters; i++) { + for_each_set_bit(i, (unsigned long *)&cntr_mask, X86_PMC_IDX_MAX) { release_perfctr_nmi(x86_pmu_event_addr(i)); release_evntsel_nmi(x86_pmu_config_addr(i)); } @@ -248,7 +252,8 @@ static void release_pmc_hardware(void) {}
#endif
-bool check_hw_exists(struct pmu *pmu, int num_counters, int num_counters_fixed) +bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask, + unsigned long *fixed_cntr_mask) { u64 val, val_fail = -1, val_new= ~0; int i, reg, reg_fail = -1, ret = 0; @@ -259,7 +264,7 @@ bool check_hw_exists(struct pmu *pmu, int num_counters, int num_counters_fixed) * Check to see if the BIOS enabled any of the counters, if so * complain and bail. */ - for (i = 0; i < num_counters; i++) { + for_each_set_bit(i, cntr_mask, X86_PMC_IDX_MAX) { reg = x86_pmu_config_addr(i); ret = rdmsrl_safe(reg, &val); if (ret) @@ -273,12 +278,12 @@ bool check_hw_exists(struct pmu *pmu, int num_counters, int num_counters_fixed) } }
- if (num_counters_fixed) { + if (*(u64 *)fixed_cntr_mask) { reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; ret = rdmsrl_safe(reg, &val); if (ret) goto msr_fail; - for (i = 0; i < num_counters_fixed; i++) { + for_each_set_bit(i, fixed_cntr_mask, X86_PMC_IDX_MAX) { if (fixed_counter_disabled(i, pmu)) continue; if (val & (0x03ULL << i*4)) { @@ -679,7 +684,7 @@ void x86_pmu_disable_all(void) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int idx;
- for (idx = 0; idx < x86_pmu.num_counters; idx++) { + for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { struct hw_perf_event *hwc = &cpuc->events[idx]->hw; u64 val;
@@ -736,7 +741,7 @@ void x86_pmu_enable_all(int added) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int idx;
- for (idx = 0; idx < x86_pmu.num_counters; idx++) { + for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
if (!test_bit(idx, cpuc->active_mask)) @@ -975,7 +980,6 @@ EXPORT_SYMBOL_GPL(perf_assign_events);
int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) { - int num_counters = hybrid(cpuc->pmu, num_counters); struct event_constraint *c; struct perf_event *e; int n0, i, wmin, wmax, unsched = 0; @@ -1051,7 +1055,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
/* slow path */ if (i != n) { - int gpmax = num_counters; + int gpmax = x86_pmu_max_num_counters(cpuc->pmu);
/* * Do not allow scheduling of more than half the available @@ -1072,7 +1076,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) * the extra Merge events needed by large increment events. */ if (x86_pmu.flags & PMU_FL_PAIR) { - gpmax = num_counters - cpuc->n_pair; + gpmax -= cpuc->n_pair; WARN_ON(gpmax <= 0); }
@@ -1157,12 +1161,10 @@ static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event, */ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp) { - int num_counters = hybrid(cpuc->pmu, num_counters); - int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); struct perf_event *event; int n, max_count;
- max_count = num_counters + num_counters_fixed; + max_count = x86_pmu_num_counters(cpuc->pmu) + x86_pmu_num_counters_fixed(cpuc->pmu);
/* current number of events already accepted */ n = cpuc->n_events; @@ -1519,19 +1521,22 @@ static void x86_pmu_start(struct perf_event *event, int flags) void perf_event_print_debug(void) { u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; + unsigned long *cntr_mask, *fixed_cntr_mask; + struct event_constraint *pebs_constraints; + struct cpu_hw_events *cpuc; u64 pebs, debugctl; - int cpu = smp_processor_id(); - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); - int num_counters = hybrid(cpuc->pmu, num_counters); - int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); - struct event_constraint *pebs_constraints = hybrid(cpuc->pmu, pebs_constraints); - unsigned long flags; - int idx; + int cpu, idx;
- if (!num_counters) - return; + guard(irqsave)(); + + cpu = smp_processor_id(); + cpuc = &per_cpu(cpu_hw_events, cpu); + cntr_mask = hybrid(cpuc->pmu, cntr_mask); + fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask); + pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
- local_irq_save(flags); + if (!*(u64 *)cntr_mask) + return;
if (x86_pmu.version >= 2) { rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); @@ -1555,7 +1560,7 @@ void perf_event_print_debug(void) } pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
- for (idx = 0; idx < num_counters; idx++) { + for_each_set_bit(idx, cntr_mask, X86_PMC_IDX_MAX) { rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl); rdmsrl(x86_pmu_event_addr(idx), pmc_count);
@@ -1568,7 +1573,7 @@ void perf_event_print_debug(void) pr_info("CPU#%d: gen-PMC%d left: %016llx\n", cpu, idx, prev_left); } - for (idx = 0; idx < num_counters_fixed; idx++) { + for_each_set_bit(idx, fixed_cntr_mask, X86_PMC_IDX_MAX) { if (fixed_counter_disabled(idx, cpuc->pmu)) continue; rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); @@ -1576,7 +1581,6 @@ void perf_event_print_debug(void) pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", cpu, idx, pmc_count); } - local_irq_restore(flags); }
void x86_pmu_stop(struct perf_event *event, int flags) @@ -1682,7 +1686,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs) */ apic_write(APIC_LVTPC, APIC_DM_NMI);
- for (idx = 0; idx < x86_pmu.num_counters; idx++) { + for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { if (!test_bit(idx, cpuc->active_mask)) continue;
@@ -2038,18 +2042,15 @@ static void _x86_pmu_read(struct perf_event *event) static_call(x86_pmu_update)(event); }
-void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed, - u64 intel_ctrl) +void x86_pmu_show_pmu_cap(struct pmu *pmu) { pr_info("... version: %d\n", x86_pmu.version); pr_info("... bit width: %d\n", x86_pmu.cntval_bits); - pr_info("... generic registers: %d\n", num_counters); + pr_info("... generic registers: %d\n", x86_pmu_num_counters(pmu)); pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask); pr_info("... max period: %016Lx\n", x86_pmu.max_period); - pr_info("... fixed-purpose events: %lu\n", - hweight64((((1ULL << num_counters_fixed) - 1) - << INTEL_PMC_IDX_FIXED) & intel_ctrl)); - pr_info("... event mask: %016Lx\n", intel_ctrl); + pr_info("... fixed-purpose events: %d\n", x86_pmu_num_counters_fixed(pmu)); + pr_info("... event mask: %016Lx\n", hybrid(pmu, intel_ctrl)); }
static int __init init_hw_perf_events(void) @@ -2086,7 +2087,7 @@ static int __init init_hw_perf_events(void) pmu_check_apic();
/* sanity check that the hardware exists or is emulated */ - if (!check_hw_exists(&pmu, x86_pmu.num_counters, x86_pmu.num_counters_fixed)) + if (!check_hw_exists(&pmu, x86_pmu.cntr_mask, x86_pmu.fixed_cntr_mask)) goto out_bad_pmu;
pr_cont("%s PMU driver.\n", x86_pmu.name); @@ -2097,14 +2098,14 @@ static int __init init_hw_perf_events(void) quirk->func();
if (!x86_pmu.intel_ctrl) - x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; + x86_pmu.intel_ctrl = x86_pmu.cntr_mask64;
perf_events_lapic_init(); register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
unconstrained = (struct event_constraint) - __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, - 0, x86_pmu.num_counters, 0, 0); + __EVENT_CONSTRAINT(0, x86_pmu.cntr_mask64, + 0, x86_pmu_num_counters(NULL), 0, 0);
x86_pmu_format_group.attrs = x86_pmu.format_attrs;
@@ -2113,11 +2114,8 @@ static int __init init_hw_perf_events(void)
pmu.attr_update = x86_pmu.attr_update;
- if (!is_hybrid()) { - x86_pmu_show_pmu_cap(x86_pmu.num_counters, - x86_pmu.num_counters_fixed, - x86_pmu.intel_ctrl); - } + if (!is_hybrid()) + x86_pmu_show_pmu_cap(NULL);
if (!x86_pmu.read) x86_pmu.read = _x86_pmu_read; @@ -2481,7 +2479,7 @@ void perf_clear_dirty_counters(void) for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) { if (i >= INTEL_PMC_IDX_FIXED) { /* Metrics and fake events don't have corresponding HW counters. */ - if ((i - INTEL_PMC_IDX_FIXED) >= hybrid(cpuc->pmu, num_counters_fixed)) + if (!test_bit(i - INTEL_PMC_IDX_FIXED, hybrid(cpuc->pmu, fixed_cntr_mask))) continue;
wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + (i - INTEL_PMC_IDX_FIXED), 0); @@ -2986,8 +2984,8 @@ void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) * base PMU holds the correct number of counters for P-cores. */ cap->version = x86_pmu.version; - cap->num_counters_gp = x86_pmu.num_counters; - cap->num_counters_fixed = x86_pmu.num_counters_fixed; + cap->num_counters_gp = x86_pmu_num_counters(NULL); + cap->num_counters_fixed = x86_pmu_num_counters_fixed(NULL); cap->bit_width_gp = x86_pmu.cntval_bits; cap->bit_width_fixed = x86_pmu.cntval_bits; cap->events_mask = (unsigned int)x86_pmu.events_maskl; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 101a21fe9c21..f25205d047e8 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2874,23 +2874,23 @@ static void intel_pmu_reset(void) { struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); - int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); - int num_counters = hybrid(cpuc->pmu, num_counters); + unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask); + unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask); unsigned long flags; int idx;
- if (!num_counters) + if (!*(u64 *)cntr_mask) return;
local_irq_save(flags);
pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
- for (idx = 0; idx < num_counters; idx++) { + for_each_set_bit(idx, cntr_mask, INTEL_PMC_MAX_GENERIC) { wrmsrl_safe(x86_pmu_config_addr(idx), 0ull); wrmsrl_safe(x86_pmu_event_addr(idx), 0ull); } - for (idx = 0; idx < num_counters_fixed; idx++) { + for_each_set_bit(idx, fixed_cntr_mask, INTEL_PMC_MAX_FIXED) { if (fixed_counter_disabled(idx, cpuc->pmu)) continue; wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); @@ -2940,8 +2940,7 @@ static void x86_pmu_handle_guest_pebs(struct pt_regs *regs, !guest_pebs_idxs) return;
- for_each_set_bit(bit, (unsigned long *)&guest_pebs_idxs, - INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed) { + for_each_set_bit(bit, (unsigned long *)&guest_pebs_idxs, X86_PMC_IDX_MAX) { event = cpuc->events[bit]; if (!event->attr.precise_ip) continue; @@ -4199,7 +4198,7 @@ static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data) struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; int idx;
- for (idx = 0; idx < x86_pmu.num_counters; idx++) { + for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { struct perf_event *event = cpuc->events[idx];
arr[idx].msr = x86_pmu_config_addr(idx); @@ -4217,7 +4216,7 @@ static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data) arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE; }
- *nr = x86_pmu.num_counters; + *nr = x86_pmu_max_num_counters(cpuc->pmu); return arr; }
@@ -4232,7 +4231,7 @@ static void core_pmu_enable_all(int added) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int idx;
- for (idx = 0; idx < x86_pmu.num_counters; idx++) { + for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
if (!test_bit(idx, cpuc->active_mask) || @@ -4684,13 +4683,33 @@ static void flip_smm_bit(void *data) } }
-static void intel_pmu_check_num_counters(int *num_counters, - int *num_counters_fixed, - u64 *intel_ctrl, u64 fixed_mask); +static void intel_pmu_check_counters_mask(u64 *cntr_mask, + u64 *fixed_cntr_mask, + u64 *intel_ctrl) +{ + unsigned int bit; + + bit = fls64(*cntr_mask); + if (bit > INTEL_PMC_MAX_GENERIC) { + WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", + bit, INTEL_PMC_MAX_GENERIC); + *cntr_mask &= GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0); + } + *intel_ctrl = *cntr_mask; + + bit = fls64(*fixed_cntr_mask); + if (bit > INTEL_PMC_MAX_FIXED) { + WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", + bit, INTEL_PMC_MAX_FIXED); + *fixed_cntr_mask &= GENMASK_ULL(INTEL_PMC_MAX_FIXED - 1, 0); + } + + *intel_ctrl |= *fixed_cntr_mask << INTEL_PMC_IDX_FIXED; +}
static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints, - int num_counters, - int num_counters_fixed, + u64 cntr_mask, + u64 fixed_cntr_mask, u64 intel_ctrl);
static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs); @@ -4713,11 +4732,10 @@ static void update_pmu_cap(struct x86_hybrid_pmu *pmu) if (sub_bitmaps & ARCH_PERFMON_NUM_COUNTER_LEAF_BIT) { cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF, &eax, &ebx, &ecx, &edx); - pmu->num_counters = fls(eax); - pmu->num_counters_fixed = fls(ebx); + pmu->cntr_mask64 = eax; + pmu->fixed_cntr_mask64 = ebx; }
- if (!intel_pmu_broken_perf_cap()) { /* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */ rdmsrl(MSR_IA32_PERF_CAPABILITIES, pmu->intel_cap.capabilities); @@ -4726,12 +4744,12 @@ static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu) { - intel_pmu_check_num_counters(&pmu->num_counters, &pmu->num_counters_fixed, - &pmu->intel_ctrl, (1ULL << pmu->num_counters_fixed) - 1); - pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters); + intel_pmu_check_counters_mask(&pmu->cntr_mask64, &pmu->fixed_cntr_mask64, + &pmu->intel_ctrl); + pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64); pmu->unconstrained = (struct event_constraint) - __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, - 0, pmu->num_counters, 0, 0); + __EVENT_CONSTRAINT(0, pmu->cntr_mask64, + 0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
if (pmu->intel_cap.perf_metrics) pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; @@ -4744,8 +4762,8 @@ static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu) pmu->pmu.capabilities &= ~PERF_PMU_CAP_AUX_OUTPUT;
intel_pmu_check_event_constraints(pmu->event_constraints, - pmu->num_counters, - pmu->num_counters_fixed, + pmu->cntr_mask64, + pmu->fixed_cntr_mask64, pmu->intel_ctrl);
intel_pmu_check_extra_regs(pmu->extra_regs); @@ -4806,7 +4824,7 @@ static bool init_hybrid_pmu(int cpu)
intel_pmu_check_hybrid_pmus(pmu);
- if (!check_hw_exists(&pmu->pmu, pmu->num_counters, pmu->num_counters_fixed)) + if (!check_hw_exists(&pmu->pmu, pmu->cntr_mask, pmu->fixed_cntr_mask)) return false;
pr_info("%s PMU driver: ", pmu->name); @@ -4816,8 +4834,7 @@ static bool init_hybrid_pmu(int cpu)
pr_cont("\n");
- x86_pmu_show_pmu_cap(pmu->num_counters, pmu->num_counters_fixed, - pmu->intel_ctrl); + x86_pmu_show_pmu_cap(&pmu->pmu);
end: cpumask_set_cpu(cpu, &pmu->supported_cpus); @@ -5955,29 +5972,9 @@ static const struct attribute_group *hybrid_attr_update[] = {
static struct attribute *empty_attrs;
-static void intel_pmu_check_num_counters(int *num_counters, - int *num_counters_fixed, - u64 *intel_ctrl, u64 fixed_mask) -{ - if (*num_counters > INTEL_PMC_MAX_GENERIC) { - WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", - *num_counters, INTEL_PMC_MAX_GENERIC); - *num_counters = INTEL_PMC_MAX_GENERIC; - } - *intel_ctrl = (1ULL << *num_counters) - 1; - - if (*num_counters_fixed > INTEL_PMC_MAX_FIXED) { - WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", - *num_counters_fixed, INTEL_PMC_MAX_FIXED); - *num_counters_fixed = INTEL_PMC_MAX_FIXED; - } - - *intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED; -} - static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints, - int num_counters, - int num_counters_fixed, + u64 cntr_mask, + u64 fixed_cntr_mask, u64 intel_ctrl) { struct event_constraint *c; @@ -6014,10 +6011,9 @@ static void intel_pmu_check_event_constraints(struct event_constraint *event_con * generic counters */ if (!use_fixed_pseudo_encoding(c->code)) - c->idxmsk64 |= (1ULL << num_counters) - 1; + c->idxmsk64 |= cntr_mask; } - c->idxmsk64 &= - ~(~0ULL << (INTEL_PMC_IDX_FIXED + num_counters_fixed)); + c->idxmsk64 &= cntr_mask | (fixed_cntr_mask << INTEL_PMC_IDX_FIXED); c->weight = hweight64(c->idxmsk64); } } @@ -6068,12 +6064,12 @@ static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus) pmu->pmu_type = intel_hybrid_pmu_type_map[bit].id; pmu->name = intel_hybrid_pmu_type_map[bit].name;
- pmu->num_counters = x86_pmu.num_counters; - pmu->num_counters_fixed = x86_pmu.num_counters_fixed; - pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters); + pmu->cntr_mask64 = x86_pmu.cntr_mask64; + pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64; + pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64); pmu->unconstrained = (struct event_constraint) - __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, - 0, pmu->num_counters, 0, 0); + __EVENT_CONSTRAINT(0, pmu->cntr_mask64, + 0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; if (pmu->pmu_type & hybrid_small) { @@ -6186,14 +6182,14 @@ __init int intel_pmu_init(void) x86_pmu = intel_pmu;
x86_pmu.version = version; - x86_pmu.num_counters = eax.split.num_counters; + x86_pmu.cntr_mask64 = GENMASK_ULL(eax.split.num_counters - 1, 0); x86_pmu.cntval_bits = eax.split.bit_width; x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
x86_pmu.events_maskl = ebx.full; x86_pmu.events_mask_len = eax.split.mask_length;
- x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters); + x86_pmu.pebs_events_mask = intel_pmu_pebs_mask(x86_pmu.cntr_mask64); x86_pmu.pebs_capable = PEBS_COUNTER_MASK;
/* @@ -6203,12 +6199,10 @@ __init int intel_pmu_init(void) if (version > 1 && version < 5) { int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
- x86_pmu.num_counters_fixed = - max((int)edx.split.num_counters_fixed, assume); - - fixed_mask = (1L << x86_pmu.num_counters_fixed) - 1; + x86_pmu.fixed_cntr_mask64 = + GENMASK_ULL(max((int)edx.split.num_counters_fixed, assume) - 1, 0); } else if (version >= 5) - x86_pmu.num_counters_fixed = fls(fixed_mask); + x86_pmu.fixed_cntr_mask64 = fixed_mask;
if (boot_cpu_has(X86_FEATURE_PDCM)) { u64 capabilities; @@ -6807,11 +6801,13 @@ __init int intel_pmu_init(void) pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; intel_pmu_init_glc(&pmu->pmu); if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) { - pmu->num_counters = x86_pmu.num_counters + 2; - pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1; + pmu->cntr_mask64 <<= 2; + pmu->cntr_mask64 |= 0x3; + pmu->fixed_cntr_mask64 <<= 1; + pmu->fixed_cntr_mask64 |= 0x1; } else { - pmu->num_counters = x86_pmu.num_counters; - pmu->num_counters_fixed = x86_pmu.num_counters_fixed; + pmu->cntr_mask64 = x86_pmu.cntr_mask64; + pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64; }
/* @@ -6821,15 +6817,16 @@ __init int intel_pmu_init(void) * mistakenly add extra counters for P-cores. Correct the number of * counters here. */ - if ((pmu->num_counters > 8) || (pmu->num_counters_fixed > 4)) { - pmu->num_counters = x86_pmu.num_counters; - pmu->num_counters_fixed = x86_pmu.num_counters_fixed; + if ((x86_pmu_num_counters(&pmu->pmu) > 8) || (x86_pmu_num_counters_fixed(&pmu->pmu) > 4)) { + pmu->cntr_mask64 = x86_pmu.cntr_mask64; + pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64; }
- pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters); + pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64); pmu->unconstrained = (struct event_constraint) - __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, - 0, pmu->num_counters, 0, 0); + __EVENT_CONSTRAINT(0, pmu->cntr_mask64, + 0, x86_pmu_num_counters(&pmu->pmu), 0, 0); + pmu->extra_regs = intel_glc_extra_regs;
/* Initialize Atom core specific PerfMon capabilities.*/ @@ -6896,9 +6893,9 @@ __init int intel_pmu_init(void) * The constraints may be cut according to the CPUID enumeration * by inserting the EVENT_CONSTRAINT_END. */ - if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) - x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED; - intel_v5_gen_event_constraints[x86_pmu.num_counters_fixed].weight = -1; + if (fls64(x86_pmu.fixed_cntr_mask64) > INTEL_PMC_MAX_FIXED) + x86_pmu.fixed_cntr_mask64 &= GENMASK_ULL(INTEL_PMC_MAX_FIXED - 1, 0); + intel_v5_gen_event_constraints[fls64(x86_pmu.fixed_cntr_mask64)].weight = -1; x86_pmu.event_constraints = intel_v5_gen_event_constraints; pr_cont("generic architected perfmon, "); name = "generic_arch_v5+"; @@ -6925,18 +6922,17 @@ __init int intel_pmu_init(void) x86_pmu.attr_update = hybrid_attr_update; }
- intel_pmu_check_num_counters(&x86_pmu.num_counters, - &x86_pmu.num_counters_fixed, - &x86_pmu.intel_ctrl, - (u64)fixed_mask); + intel_pmu_check_counters_mask(&x86_pmu.cntr_mask64, + &x86_pmu.fixed_cntr_mask64, + &x86_pmu.intel_ctrl);
/* AnyThread may be deprecated on arch perfmon v5 or later */ if (x86_pmu.intel_cap.anythread_deprecated) x86_pmu.format_attrs = intel_arch_formats_attr;
intel_pmu_check_event_constraints(x86_pmu.event_constraints, - x86_pmu.num_counters, - x86_pmu.num_counters_fixed, + x86_pmu.cntr_mask64, + x86_pmu.fixed_cntr_mask64, x86_pmu.intel_ctrl); /* * Access LBR MSR may cause #GP under certain circumstances. diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index dd1832055891..9f116dfc4728 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -41,7 +41,7 @@ * MSR_CORE_C1_RES: CORE C1 Residency Counter * perf code: 0x00 * Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL - * MTL,SRF,GRR + * MTL,SRF,GRR,ARL,LNL * Scope: Core (each processor core has a MSR) * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter * perf code: 0x01 @@ -53,30 +53,31 @@ * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX, * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF, - * GRR + * GRR,ARL,LNL * Scope: Core * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter * perf code: 0x03 * Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML, - * ICL,TGL,RKL,ADL,RPL,MTL + * ICL,TGL,RKL,ADL,RPL,MTL,ARL,LNL * Scope: Core * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter. * perf code: 0x00 * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL, * KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL, - * RPL,SPR,MTL + * RPL,SPR,MTL,ARL,LNL,SRF * Scope: Package (physical package) * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter. * perf code: 0x01 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL, * GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL, - * ADL,RPL,MTL + * ADL,RPL,MTL,ARL,LNL * Scope: Package (physical package) * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter. * perf code: 0x02 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX, - * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF + * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF, + * ARL,LNL * Scope: Package (physical package) * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. * perf code: 0x03 @@ -86,7 +87,7 @@ * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter. * perf code: 0x04 * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL, - * ADL,RPL,MTL + * ADL,RPL,MTL,ARL * Scope: Package (physical package) * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter. * perf code: 0x05 @@ -95,7 +96,7 @@ * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter. * perf code: 0x06 * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL, - * TNT,RKL,ADL,RPL,MTL + * TNT,RKL,ADL,RPL,MTL,ARL,LNL * Scope: Package (physical package) * MSR_MODULE_C6_RES_MS: Module C6 Residency Counter. * perf code: 0x00 @@ -640,6 +641,17 @@ static const struct cstate_model adl_cstates __initconst = { BIT(PERF_CSTATE_PKG_C10_RES), };
+static const struct cstate_model lnl_cstates __initconst = { + .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | + BIT(PERF_CSTATE_CORE_C6_RES) | + BIT(PERF_CSTATE_CORE_C7_RES), + + .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | + BIT(PERF_CSTATE_PKG_C3_RES) | + BIT(PERF_CSTATE_PKG_C6_RES) | + BIT(PERF_CSTATE_PKG_C10_RES), +}; + static const struct cstate_model slm_cstates __initconst = { .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | BIT(PERF_CSTATE_CORE_C6_RES), @@ -681,7 +693,8 @@ static const struct cstate_model srf_cstates __initconst = { .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | BIT(PERF_CSTATE_CORE_C6_RES),
- .pkg_events = BIT(PERF_CSTATE_PKG_C6_RES), + .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | + BIT(PERF_CSTATE_PKG_C6_RES),
.module_events = BIT(PERF_CSTATE_MODULE_C6_RES), }; @@ -760,6 +773,10 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &adl_cstates), X86_MATCH_VFM(INTEL_METEORLAKE, &adl_cstates), X86_MATCH_VFM(INTEL_METEORLAKE_L, &adl_cstates), + X86_MATCH_VFM(INTEL_ARROWLAKE, &adl_cstates), + X86_MATCH_VFM(INTEL_ARROWLAKE_H, &adl_cstates), + X86_MATCH_VFM(INTEL_ARROWLAKE_U, &adl_cstates), + X86_MATCH_VFM(INTEL_LUNARLAKE_M, &lnl_cstates), { }, }; MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 80a4f712217b..9212053f6f1d 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1137,8 +1137,7 @@ void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sche static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) { struct debug_store *ds = cpuc->ds; - int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events); - int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); + int max_pebs_events = intel_pmu_max_num_pebs(cpuc->pmu); u64 threshold; int reserved;
@@ -1146,7 +1145,7 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) return;
if (x86_pmu.flags & PMU_FL_PEBS_ALL) - reserved = max_pebs_events + num_counters_fixed; + reserved = max_pebs_events + x86_pmu_max_num_counters_fixed(cpuc->pmu); else reserved = max_pebs_events;
@@ -2161,6 +2160,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d void *base, *at, *top; short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; + int max_pebs_events = intel_pmu_max_num_pebs(NULL); int bit, i, size; u64 mask;
@@ -2172,11 +2172,11 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
ds->pebs_index = ds->pebs_buffer_base;
- mask = (1ULL << x86_pmu.max_pebs_events) - 1; - size = x86_pmu.max_pebs_events; + mask = x86_pmu.pebs_events_mask; + size = max_pebs_events; if (x86_pmu.flags & PMU_FL_PEBS_ALL) { - mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED; - size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed; + mask |= x86_pmu.fixed_cntr_mask64 << INTEL_PMC_IDX_FIXED; + size = INTEL_PMC_IDX_FIXED + x86_pmu_max_num_counters_fixed(NULL); }
if (unlikely(base >= top)) { @@ -2212,8 +2212,9 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d pebs_status = p->status = cpuc->pebs_enabled;
bit = find_first_bit((unsigned long *)&pebs_status, - x86_pmu.max_pebs_events); - if (bit >= x86_pmu.max_pebs_events) + max_pebs_events); + + if (!(x86_pmu.pebs_events_mask & (1 << bit))) continue;
/* @@ -2271,12 +2272,10 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d { short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); - int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events); - int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); struct debug_store *ds = cpuc->ds; struct perf_event *event; void *base, *at, *top; - int bit, size; + int bit; u64 mask;
if (!x86_pmu.pebs_active) @@ -2287,12 +2286,11 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
ds->pebs_index = ds->pebs_buffer_base;
- mask = ((1ULL << max_pebs_events) - 1) | - (((1ULL << num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED); - size = INTEL_PMC_IDX_FIXED + num_counters_fixed; + mask = hybrid(cpuc->pmu, pebs_events_mask) | + (hybrid(cpuc->pmu, fixed_cntr_mask64) << INTEL_PMC_IDX_FIXED);
if (unlikely(base >= top)) { - intel_pmu_pebs_event_update_no_drain(cpuc, size); + intel_pmu_pebs_event_update_no_drain(cpuc, X86_PMC_IDX_MAX); return; }
@@ -2302,11 +2300,11 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d pebs_status = get_pebs_status(at) & cpuc->pebs_enabled; pebs_status &= mask;
- for_each_set_bit(bit, (unsigned long *)&pebs_status, size) + for_each_set_bit(bit, (unsigned long *)&pebs_status, X86_PMC_IDX_MAX) counts[bit]++; }
- for_each_set_bit(bit, (unsigned long *)&mask, size) { + for_each_set_bit(bit, (unsigned long *)&mask, X86_PMC_IDX_MAX) { if (counts[bit] == 0) continue;
diff --git a/arch/x86/events/intel/knc.c b/arch/x86/events/intel/knc.c index 618001c208e8..034a1f6a457c 100644 --- a/arch/x86/events/intel/knc.c +++ b/arch/x86/events/intel/knc.c @@ -303,7 +303,7 @@ static const struct x86_pmu knc_pmu __initconst = { .apic = 1, .max_period = (1ULL << 39) - 1, .version = 0, - .num_counters = 2, + .cntr_mask64 = 0x3, .cntval_bits = 40, .cntval_mask = (1ULL << 40) - 1, .get_event_constraints = x86_get_event_constraints, diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c index 35936188db01..844bc4fc4724 100644 --- a/arch/x86/events/intel/p4.c +++ b/arch/x86/events/intel/p4.c @@ -919,7 +919,7 @@ static void p4_pmu_disable_all(void) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int idx;
- for (idx = 0; idx < x86_pmu.num_counters; idx++) { + for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { struct perf_event *event = cpuc->events[idx]; if (!test_bit(idx, cpuc->active_mask)) continue; @@ -998,7 +998,7 @@ static void p4_pmu_enable_all(int added) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int idx;
- for (idx = 0; idx < x86_pmu.num_counters; idx++) { + for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { struct perf_event *event = cpuc->events[idx]; if (!test_bit(idx, cpuc->active_mask)) continue; @@ -1040,7 +1040,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
cpuc = this_cpu_ptr(&cpu_hw_events);
- for (idx = 0; idx < x86_pmu.num_counters; idx++) { + for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { int overflow;
if (!test_bit(idx, cpuc->active_mask)) { @@ -1353,7 +1353,7 @@ static __initconst const struct x86_pmu p4_pmu = { * though leave it restricted at moment assuming * HT is on */ - .num_counters = ARCH_P4_MAX_CCCR, + .cntr_mask64 = GENMASK_ULL(ARCH_P4_MAX_CCCR - 1, 0), .apic = 1, .cntval_bits = ARCH_P4_CNTRVAL_BITS, .cntval_mask = ARCH_P4_CNTRVAL_MASK, @@ -1395,7 +1395,7 @@ __init int p4_pmu_init(void) * * Solve this by zero'ing out the registers to mimic a reset. */ - for (i = 0; i < x86_pmu.num_counters; i++) { + for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { reg = x86_pmu_config_addr(i); wrmsrl_safe(reg, 0ULL); } diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c index 408879b0c0d4..a6cffb4f4ef5 100644 --- a/arch/x86/events/intel/p6.c +++ b/arch/x86/events/intel/p6.c @@ -214,7 +214,7 @@ static __initconst const struct x86_pmu p6_pmu = { .apic = 1, .max_period = (1ULL << 31) - 1, .version = 0, - .num_counters = 2, + .cntr_mask64 = 0x3, /* * Events have 40 bits implemented. However they are designed such * that bits [32-39] are sign extensions of bit 31. As such the diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 72b022a1e16c..745c174fc880 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -684,9 +684,15 @@ struct x86_hybrid_pmu { cpumask_t supported_cpus; union perf_capabilities intel_cap; u64 intel_ctrl; - int max_pebs_events; - int num_counters; - int num_counters_fixed; + u64 pebs_events_mask; + union { + u64 cntr_mask64; + unsigned long cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + }; + union { + u64 fixed_cntr_mask64; + unsigned long fixed_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + }; struct event_constraint unconstrained;
u64 hw_cache_event_ids @@ -774,8 +780,14 @@ struct x86_pmu { int (*rdpmc_index)(int index); u64 (*event_map)(int); int max_events; - int num_counters; - int num_counters_fixed; + union { + u64 cntr_mask64; + unsigned long cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + }; + union { + u64 fixed_cntr_mask64; + unsigned long fixed_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + }; int cntval_bits; u64 cntval_mask; union { @@ -852,7 +864,7 @@ struct x86_pmu { pebs_ept :1; int pebs_record_size; int pebs_buffer_size; - int max_pebs_events; + u64 pebs_events_mask; void (*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data); struct event_constraint *pebs_constraints; void (*pebs_aliases)(struct perf_event *event); @@ -1125,8 +1137,8 @@ static inline int x86_pmu_rdpmc_index(int index) return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; }
-bool check_hw_exists(struct pmu *pmu, int num_counters, - int num_counters_fixed); +bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask, + unsigned long *fixed_cntr_mask);
int x86_add_exclusive(unsigned int what);
@@ -1197,8 +1209,27 @@ void x86_pmu_enable_event(struct perf_event *event);
int x86_pmu_handle_irq(struct pt_regs *regs);
-void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed, - u64 intel_ctrl); +void x86_pmu_show_pmu_cap(struct pmu *pmu); + +static inline int x86_pmu_num_counters(struct pmu *pmu) +{ + return hweight64(hybrid(pmu, cntr_mask64)); +} + +static inline int x86_pmu_max_num_counters(struct pmu *pmu) +{ + return fls64(hybrid(pmu, cntr_mask64)); +} + +static inline int x86_pmu_num_counters_fixed(struct pmu *pmu) +{ + return hweight64(hybrid(pmu, fixed_cntr_mask64)); +} + +static inline int x86_pmu_max_num_counters_fixed(struct pmu *pmu) +{ + return fls64(hybrid(pmu, fixed_cntr_mask64)); +}
extern struct event_constraint emptyconstraint;
@@ -1661,6 +1692,17 @@ static inline int is_ht_workaround_enabled(void) return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED); }
+static inline u64 intel_pmu_pebs_mask(u64 cntr_mask) +{ + return MAX_PEBS_EVENTS_MASK & cntr_mask; +} + +static inline int intel_pmu_max_num_pebs(struct pmu *pmu) +{ + static_assert(MAX_PEBS_EVENTS == 32); + return fls((u32)hybrid(pmu, pebs_events_mask)); +} + #else /* CONFIG_CPU_SUP_INTEL */
static inline void reserve_ds_buffers(void) diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c index 3e9acdaeed1e..2fd9b0cf9a5e 100644 --- a/arch/x86/events/zhaoxin/core.c +++ b/arch/x86/events/zhaoxin/core.c @@ -530,13 +530,13 @@ __init int zhaoxin_pmu_init(void) pr_info("Version check pass!\n");
x86_pmu.version = version; - x86_pmu.num_counters = eax.split.num_counters; + x86_pmu.cntr_mask64 = GENMASK_ULL(eax.split.num_counters - 1, 0); x86_pmu.cntval_bits = eax.split.bit_width; x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1; x86_pmu.events_maskl = ebx.full; x86_pmu.events_mask_len = eax.split.mask_length;
- x86_pmu.num_counters_fixed = edx.split.num_counters_fixed; + x86_pmu.fixed_cntr_mask64 = GENMASK_ULL(edx.split.num_counters_fixed - 1, 0); x86_add_quirk(zhaoxin_arch_events_quirk);
switch (boot_cpu_data.x86) { @@ -604,13 +604,13 @@ __init int zhaoxin_pmu_init(void) return -ENODEV; }
- x86_pmu.intel_ctrl = (1 << (x86_pmu.num_counters)) - 1; - x86_pmu.intel_ctrl |= ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED; + x86_pmu.intel_ctrl = x86_pmu.cntr_mask64; + x86_pmu.intel_ctrl |= x86_pmu.fixed_cntr_mask64 << INTEL_PMC_IDX_FIXED;
if (x86_pmu.event_constraints) { for_each_event_constraint(c, x86_pmu.event_constraints) { - c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; - c->weight += x86_pmu.num_counters; + c->idxmsk64 |= x86_pmu.cntr_mask64; + c->weight += x86_pmu_num_counters(NULL); } }
diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h index 2f9eeb5c3069..5dbeac48a5b9 100644 --- a/arch/x86/include/asm/intel_ds.h +++ b/arch/x86/include/asm/intel_ds.h @@ -9,6 +9,7 @@ /* The maximal number of PEBS events: */ #define MAX_PEBS_EVENTS_FMT4 8 #define MAX_PEBS_EVENTS 32 +#define MAX_PEBS_EVENTS_MASK GENMASK_ULL(MAX_PEBS_EVENTS - 1, 0) #define MAX_FIXED_PEBS_EVENTS 16
/* diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index a053c1293975..68da67df304d 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -66,13 +66,15 @@ static inline bool vcpu_is_preempted(long cpu)
#ifdef CONFIG_PARAVIRT /* - * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack. + * virt_spin_lock_key - disables by default the virt_spin_lock() hijack. * - * Native (and PV wanting native due to vCPU pinning) should disable this key. - * It is done in this backwards fashion to only have a single direction change, - * which removes ordering between native_pv_spin_init() and HV setup. + * Native (and PV wanting native due to vCPU pinning) should keep this key + * disabled. Native does not touch the key. + * + * When in a guest then native_pv_lock_init() enables the key first and + * KVM/XEN might conditionally disable it later in the boot process again. */ -DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key); +DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
/* * Shortcut for the queued_spin_lock_slowpath() function that allows diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c index 767bf1c71aad..2a2fc14955cd 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.c +++ b/arch/x86/kernel/cpu/mtrr/mtrr.c @@ -609,7 +609,7 @@ void mtrr_save_state(void) { int first_cpu;
- if (!mtrr_enabled()) + if (!mtrr_enabled() || !mtrr_state.have_fixed) return;
first_cpu = cpumask_first(cpu_online_mask); diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 5358d43886ad..fec381533555 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -51,13 +51,12 @@ DEFINE_ASM_FUNC(pv_native_irq_enable, "sti", .noinstr.text); DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text); #endif
-DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key); +DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
void __init native_pv_lock_init(void) { - if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && - !boot_cpu_has(X86_FEATURE_HYPERVISOR)) - static_branch_disable(&virt_spin_lock_key); + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + static_branch_enable(&virt_spin_lock_key); }
static void native_tlb_remove_table(struct mmu_gather *tlb, void *table) diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index 2e69abf4f852..bfdf5f45b137 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -374,14 +374,14 @@ pti_clone_pgtable(unsigned long start, unsigned long end, */ *target_pmd = *pmd;
- addr += PMD_SIZE; + addr = round_up(addr + 1, PMD_SIZE);
} else if (level == PTI_CLONE_PTE) {
/* Walk the page-table down to the pte level */ pte = pte_offset_kernel(pmd, addr); if (pte_none(*pte)) { - addr += PAGE_SIZE; + addr = round_up(addr + 1, PAGE_SIZE); continue; }
@@ -401,7 +401,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end, /* Clone the PTE */ *target_pte = *pte;
- addr += PAGE_SIZE; + addr = round_up(addr + 1, PAGE_SIZE);
} else { BUG(); @@ -496,7 +496,7 @@ static void pti_clone_entry_text(void) { pti_clone_pgtable((unsigned long) __entry_text_start, (unsigned long) __entry_text_end, - PTI_CLONE_PMD); + PTI_LEVEL_KERNEL_IMAGE); }
/* diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index b379401ff1c2..44ca989f1646 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -678,12 +678,18 @@ static ssize_t acpi_battery_alarm_store(struct device *dev, return count; }
-static const struct device_attribute alarm_attr = { +static struct device_attribute alarm_attr = { .attr = {.name = "alarm", .mode = 0644}, .show = acpi_battery_alarm_show, .store = acpi_battery_alarm_store, };
+static struct attribute *acpi_battery_attrs[] = { + &alarm_attr.attr, + NULL +}; +ATTRIBUTE_GROUPS(acpi_battery); + /* * The Battery Hooking API * @@ -823,7 +829,10 @@ static void __exit battery_hook_exit(void)
static int sysfs_add_battery(struct acpi_battery *battery) { - struct power_supply_config psy_cfg = { .drv_data = battery, }; + struct power_supply_config psy_cfg = { + .drv_data = battery, + .attr_grp = acpi_battery_groups, + }; bool full_cap_broken = false;
if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) && @@ -868,7 +877,7 @@ static int sysfs_add_battery(struct acpi_battery *battery) return result; } battery_hook_add_battery(battery); - return device_create_file(&battery->bat->dev, &alarm_attr); + return 0; }
static void sysfs_remove_battery(struct acpi_battery *battery) @@ -879,7 +888,6 @@ static void sysfs_remove_battery(struct acpi_battery *battery) return; } battery_hook_remove_battery(battery); - device_remove_file(&battery->bat->dev, &alarm_attr); power_supply_unregister(battery->bat); battery->bat = NULL; mutex_unlock(&battery->sysfs_lock); diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index b5bf8b81a050..df5d5a554b38 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -524,6 +524,20 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = { DMI_MATCH(DMI_BOARD_NAME, "N6506MV"), }, }, + { + /* Asus Vivobook Pro N6506MU */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "N6506MU"), + }, + }, + { + /* Asus Vivobook Pro N6506MJ */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "N6506MJ"), + }, + }, { /* LG Electronics 17U70P */ .matches = { diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index dc8164b182dc..442c5905d43b 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -77,7 +77,6 @@ struct acpi_battery { u16 spec; u8 id; u8 present:1; - u8 have_sysfs_alarm:1; };
#define to_acpi_battery(x) power_supply_get_drvdata(x) @@ -462,12 +461,18 @@ static ssize_t acpi_battery_alarm_store(struct device *dev, return count; }
-static const struct device_attribute alarm_attr = { +static struct device_attribute alarm_attr = { .attr = {.name = "alarm", .mode = 0644}, .show = acpi_battery_alarm_show, .store = acpi_battery_alarm_store, };
+static struct attribute *acpi_battery_attrs[] = { + &alarm_attr.attr, + NULL +}; +ATTRIBUTE_GROUPS(acpi_battery); + /* -------------------------------------------------------------------------- Driver Interface -------------------------------------------------------------------------- */ @@ -518,7 +523,10 @@ static int acpi_battery_read(struct acpi_battery *battery) static int acpi_battery_add(struct acpi_sbs *sbs, int id) { struct acpi_battery *battery = &sbs->battery[id]; - struct power_supply_config psy_cfg = { .drv_data = battery, }; + struct power_supply_config psy_cfg = { + .drv_data = battery, + .attr_grp = acpi_battery_groups, + }; int result;
battery->id = id; @@ -548,10 +556,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id) goto end; }
- result = device_create_file(&battery->bat->dev, &alarm_attr); - if (result) - goto end; - battery->have_sysfs_alarm = 1; end: pr_info("%s [%s]: Battery Slot [%s] (battery %s)\n", ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device), @@ -563,11 +567,8 @@ static void acpi_battery_remove(struct acpi_sbs *sbs, int id) { struct acpi_battery *battery = &sbs->battery[id];
- if (battery->bat) { - if (battery->have_sysfs_alarm) - device_remove_file(&battery->bat->dev, &alarm_attr); + if (battery->bat) power_supply_unregister(battery->bat); - } }
static int acpi_charger_add(struct acpi_sbs *sbs) diff --git a/drivers/base/core.c b/drivers/base/core.c index 2b4c0624b704..b5399262198a 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -25,6 +25,7 @@ #include <linux/mutex.h> #include <linux/pm_runtime.h> #include <linux/netdevice.h> +#include <linux/rcupdate.h> #include <linux/sched/signal.h> #include <linux/sched/mm.h> #include <linux/string_helpers.h> @@ -2640,6 +2641,7 @@ static const char *dev_uevent_name(const struct kobject *kobj) static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) { const struct device *dev = kobj_to_dev(kobj); + struct device_driver *driver; int retval = 0;
/* add device node properties if present */ @@ -2668,8 +2670,12 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) if (dev->type && dev->type->name) add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
- if (dev->driver) - add_uevent_var(env, "DRIVER=%s", dev->driver->name); + /* Synchronize with module_remove_driver() */ + rcu_read_lock(); + driver = READ_ONCE(dev->driver); + if (driver) + add_uevent_var(env, "DRIVER=%s", driver->name); + rcu_read_unlock();
/* Add common DT information about the device */ of_device_uevent(dev, env); @@ -2739,11 +2745,8 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, if (!env) return -ENOMEM;
- /* Synchronize with really_probe() */ - device_lock(dev); /* let the kset specific function add its keys */ retval = kset->uevent_ops->uevent(&dev->kobj, env); - device_unlock(dev); if (retval) goto out;
diff --git a/drivers/base/module.c b/drivers/base/module.c index a1b55da07127..b0b79b9c189d 100644 --- a/drivers/base/module.c +++ b/drivers/base/module.c @@ -7,6 +7,7 @@ #include <linux/errno.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/rcupdate.h> #include "base.h"
static char *make_driver_name(struct device_driver *drv) @@ -97,6 +98,9 @@ void module_remove_driver(struct device_driver *drv) if (!drv) return;
+ /* Synchronize with dev_uevent() */ + synchronize_rcu(); + sysfs_remove_link(&drv->p->kobj, "module");
if (drv->owner) diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c index be32cd4e84da..292e86f60197 100644 --- a/drivers/base/regmap/regmap-kunit.c +++ b/drivers/base/regmap/regmap-kunit.c @@ -145,9 +145,9 @@ static struct regmap *gen_regmap(struct kunit *test, const struct regmap_test_param *param = test->param_value; struct regmap_test_priv *priv = test->priv; unsigned int *buf; - struct regmap *ret; + struct regmap *ret = ERR_PTR(-ENOMEM); size_t size; - int i; + int i, error; struct reg_default *defaults;
config->cache_type = param->cache; @@ -172,15 +172,17 @@ static struct regmap *gen_regmap(struct kunit *test,
*data = kzalloc(sizeof(**data), GFP_KERNEL); if (!(*data)) - return ERR_PTR(-ENOMEM); + goto out_free; (*data)->vals = buf;
if (config->num_reg_defaults) { - defaults = kcalloc(config->num_reg_defaults, - sizeof(struct reg_default), - GFP_KERNEL); + defaults = kunit_kcalloc(test, + config->num_reg_defaults, + sizeof(struct reg_default), + GFP_KERNEL); if (!defaults) - return ERR_PTR(-ENOMEM); + goto out_free; + config->reg_defaults = defaults;
for (i = 0; i < config->num_reg_defaults; i++) { @@ -190,12 +192,19 @@ static struct regmap *gen_regmap(struct kunit *test, }
ret = regmap_init_ram(priv->dev, config, *data); - if (IS_ERR(ret)) { - kfree(buf); - kfree(*data); - } else { - kunit_add_action(test, regmap_exit_action, ret); - } + if (IS_ERR(ret)) + goto out_free; + + /* This calls regmap_exit() on failure, which frees buf and *data */ + error = kunit_add_action_or_reset(test, regmap_exit_action, ret); + if (error) + ret = ERR_PTR(error); + + return ret; + +out_free: + kfree(buf); + kfree(*data);
return ret; } @@ -1497,9 +1506,9 @@ static struct regmap *gen_raw_regmap(struct kunit *test, struct regmap_test_priv *priv = test->priv; const struct regmap_test_param *param = test->param_value; u16 *buf; - struct regmap *ret; + struct regmap *ret = ERR_PTR(-ENOMEM); size_t size = (config->max_register + 1) * config->reg_bits / 8; - int i; + int i, error; struct reg_default *defaults;
config->cache_type = param->cache; @@ -1515,15 +1524,16 @@ static struct regmap *gen_raw_regmap(struct kunit *test,
*data = kzalloc(sizeof(**data), GFP_KERNEL); if (!(*data)) - return ERR_PTR(-ENOMEM); + goto out_free; (*data)->vals = (void *)buf;
config->num_reg_defaults = config->max_register + 1; - defaults = kcalloc(config->num_reg_defaults, - sizeof(struct reg_default), - GFP_KERNEL); + defaults = kunit_kcalloc(test, + config->num_reg_defaults, + sizeof(struct reg_default), + GFP_KERNEL); if (!defaults) - return ERR_PTR(-ENOMEM); + goto out_free; config->reg_defaults = defaults;
for (i = 0; i < config->num_reg_defaults; i++) { @@ -1536,7 +1546,8 @@ static struct regmap *gen_raw_regmap(struct kunit *test, defaults[i].def = be16_to_cpu(buf[i]); break; default: - return ERR_PTR(-EINVAL); + ret = ERR_PTR(-EINVAL); + goto out_free; } }
@@ -1548,12 +1559,19 @@ static struct regmap *gen_raw_regmap(struct kunit *test, config->num_reg_defaults = 0;
ret = regmap_init_raw_ram(priv->dev, config, *data); - if (IS_ERR(ret)) { - kfree(buf); - kfree(*data); - } else { - kunit_add_action(test, regmap_exit_action, ret); - } + if (IS_ERR(ret)) + goto out_free; + + /* This calls regmap_exit() on failure, which frees buf and *data */ + error = kunit_add_action_or_reset(test, regmap_exit_action, ret); + if (error) + ret = ERR_PTR(error); + + return ret; + +out_free: + kfree(buf); + kfree(*data);
return ret; } diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c index 6a863328b805..d310b525fbf0 100644 --- a/drivers/bluetooth/btnxpuart.c +++ b/drivers/bluetooth/btnxpuart.c @@ -344,7 +344,7 @@ static void ps_cancel_timer(struct btnxpuart_dev *nxpdev) struct ps_data *psdata = &nxpdev->psdata;
flush_work(&psdata->work); - del_timer_sync(&psdata->ps_timer); + timer_shutdown_sync(&psdata->ps_timer); }
static void ps_control(struct hci_dev *hdev, u8 ps_state) diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 26919556ef5f..b72b36e0abed 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -528,6 +528,7 @@ static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta) static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) { struct sh_cmt_channel *ch = dev_id; + unsigned long flags;
/* clear flags */ sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) & @@ -558,6 +559,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
ch->flags &= ~FLAG_SKIPEVENT;
+ raw_spin_lock_irqsave(&ch->lock, flags); + if (ch->flags & FLAG_REPROGRAM) { ch->flags &= ~FLAG_REPROGRAM; sh_cmt_clock_event_program_verify(ch, 1); @@ -570,6 +573,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
ch->flags &= ~FLAG_IRQCONTEXT;
+ raw_spin_unlock_irqrestore(&ch->lock, flags); + return IRQ_HANDLED; }
@@ -780,12 +785,18 @@ static int sh_cmt_clock_event_next(unsigned long delta, struct clock_event_device *ced) { struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); + unsigned long flags;
BUG_ON(!clockevent_state_oneshot(ced)); + + raw_spin_lock_irqsave(&ch->lock, flags); + if (likely(ch->flags & FLAG_IRQCONTEXT)) ch->next_match_value = delta - 1; else - sh_cmt_set_next(ch, delta - 1); + __sh_cmt_set_next(ch, delta - 1); + + raw_spin_unlock_irqrestore(&ch->lock, flags);
return 0; } diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index a092b13ffbc2..67c4a6a0ef12 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -304,10 +304,8 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata, int epp = -EINVAL; int ret;
- if (!pref_index) { - pr_debug("EPP pref_index is invalid\n"); - return -EINVAL; - } + if (!pref_index) + epp = cpudata->epp_default;
if (epp == -EINVAL) epp = epp_values[pref_index]; @@ -1439,7 +1437,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = cpudata;
- cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0); + cpudata->epp_cached = cpudata->epp_default = amd_pstate_get_epp(cpudata, 0);
policy->min = policy->cpuinfo.min_freq; policy->max = policy->cpuinfo.max_freq; @@ -1766,8 +1764,13 @@ static int __init amd_pstate_init(void) /* check if this machine need CPPC quirks */ dmi_check_system(amd_pstate_quirks_table);
- switch (cppc_state) { - case AMD_PSTATE_UNDEFINED: + /* + * determine the driver mode from the command line or kernel config. + * If no command line input is provided, cppc_state will be AMD_PSTATE_UNDEFINED. + * command line options will override the kernel config settings. + */ + + if (cppc_state == AMD_PSTATE_UNDEFINED) { /* Disable on the following configs by default: * 1. Undefined platforms * 2. Server platforms @@ -1779,15 +1782,20 @@ static int __init amd_pstate_init(void) pr_info("driver load is disabled, boot with specific mode to enable this\n"); return -ENODEV; } - ret = amd_pstate_set_driver(CONFIG_X86_AMD_PSTATE_DEFAULT_MODE); - if (ret) - return ret; - break; + /* get driver mode from kernel config option [1:4] */ + cppc_state = CONFIG_X86_AMD_PSTATE_DEFAULT_MODE; + } + + switch (cppc_state) { case AMD_PSTATE_DISABLE: + pr_info("driver load is disabled, boot with specific mode to enable this\n"); return -ENODEV; case AMD_PSTATE_PASSIVE: case AMD_PSTATE_ACTIVE: case AMD_PSTATE_GUIDED: + ret = amd_pstate_set_driver(cppc_state); + if (ret) + return ret; break; default: return -EINVAL; @@ -1808,7 +1816,7 @@ static int __init amd_pstate_init(void) /* enable amd pstate feature */ ret = amd_pstate_enable(true); if (ret) { - pr_err("failed to enable with return %d\n", ret); + pr_err("failed to enable driver mode(%d)\n", cppc_state); return ret; }
diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h index e6a28e7f4dbf..f80b33fa5d43 100644 --- a/drivers/cpufreq/amd-pstate.h +++ b/drivers/cpufreq/amd-pstate.h @@ -99,6 +99,7 @@ struct amd_cpudata { u32 policy; u64 cppc_cap1_cached; bool suspended; + s16 epp_default; };
#endif /* _LINUX_AMD_PSTATE_H */ diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index fa62367ee929..1a9aadd4c803 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -17,6 +17,7 @@ #include <linux/list.h> #include <linux/lockdep.h> #include <linux/module.h> +#include <linux/nospec.h> #include <linux/of.h> #include <linux/pinctrl/consumer.h> #include <linux/seq_file.h> @@ -198,7 +199,7 @@ gpio_device_get_desc(struct gpio_device *gdev, unsigned int hwnum) if (hwnum >= gdev->ngpio) return ERR_PTR(-EINVAL);
- return &gdev->descs[hwnum]; + return &gdev->descs[array_index_nospec(hwnum, gdev->ngpio)]; } EXPORT_SYMBOL_GPL(gpio_device_get_desc);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ee7df1d84e02..89cf9ac6da17 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4048,6 +4048,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->grbm_idx_mutex); mutex_init(&adev->mn_lock); mutex_init(&adev->virt.vf_errors.lock); + mutex_init(&adev->virt.rlcg_reg_lock); hash_init(adev->mn_hash); mutex_init(&adev->psp.mutex); mutex_init(&adev->notifier_lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index e4742b65032d..4a9cec002691 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -262,9 +262,8 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job, struct dma_fence *fence = NULL; int r;
- /* Ignore soft recovered fences here */ r = drm_sched_entity_error(s_entity); - if (r && r != -ENODATA) + if (r) goto error;
if (!fence && job->gang_submit) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c index ca5c86e5f7cd..8e8afbd237bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c @@ -334,7 +334,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
set_ta_context_funcs(psp, ta_type, &context);
- if (!context->initialized) { + if (!context || !context->initialized) { dev_err(adev->dev, "TA is not initialized\n"); ret = -EINVAL; goto err_free_shared_buf; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 1adc81a55734..0c4ee06451e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2172,12 +2172,15 @@ static void amdgpu_ras_interrupt_process_handler(struct work_struct *work) int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, struct ras_dispatch_if *info) { - struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); - struct ras_ih_data *data = &obj->ih_data; + struct ras_manager *obj; + struct ras_ih_data *data;
+ obj = amdgpu_ras_find_obj(adev, &info->head); if (!obj) return -EINVAL;
+ data = &obj->ih_data; + if (data->inuse == 0) return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 54ab51a4ada7..972a58f0f492 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -980,6 +980,9 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1; scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2; scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3; + + mutex_lock(&adev->virt.rlcg_reg_lock); + if (reg_access_ctrl->spare_int) spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
@@ -1036,6 +1039,9 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f }
ret = readl(scratch_reg0); + + mutex_unlock(&adev->virt.rlcg_reg_lock); + return ret; }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 642f1fd287d8..0ec246c74570 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -272,6 +272,8 @@ struct amdgpu_virt {
/* the ucode id to signal the autoload */ uint32_t autoload_ucode_id; + + struct mutex rlcg_reg_lock; };
struct amdgpu_video_codec_info; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 66e8a016126b..9b748d7058b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -102,6 +102,11 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, if (!r) r = amdgpu_sync_push_to_job(&sync, p->job); amdgpu_sync_free(&sync); + + if (r) { + p->num_dw_left = 0; + amdgpu_job_free(p->job); + } return r; }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 31e500859ab0..92485251247a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1658,7 +1658,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm, start = map_start << PAGE_SHIFT; end = (map_last + 1) << PAGE_SHIFT; for (addr = start; !r && addr < end; ) { - struct hmm_range *hmm_range; + struct hmm_range *hmm_range = NULL; unsigned long map_start_vma; unsigned long map_last_vma; struct vm_area_struct *vma; @@ -1696,7 +1696,12 @@ static int svm_range_validate_and_map(struct mm_struct *mm, }
svm_range_lock(prange); - if (!r && amdgpu_hmm_range_get_pages_done(hmm_range)) { + + /* Free backing memory of hmm_range if it was initialized + * Overrride return value to TRY AGAIN only if prior returns + * were successful + */ + if (hmm_range && amdgpu_hmm_range_get_pages_done(hmm_range) && !r) { pr_debug("hmm update the range, need validate again\n"); r = -EAGAIN; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 3cdcadd41be1..964bb6d0a383 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2701,7 +2701,8 @@ static int dm_suspend(void *handle)
dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state);
- dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); + if (dm->cached_dc_state) + dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
amdgpu_dm_commit_zero_streams(dm->dc);
@@ -6788,7 +6789,8 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) aconnector->dc_sink = aconnector->dc_link->local_sink ? aconnector->dc_link->local_sink : aconnector->dc_em_sink; - dc_sink_retain(aconnector->dc_sink); + if (aconnector->dc_sink) + dc_sink_retain(aconnector->dc_sink); } }
@@ -7615,7 +7617,8 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) drm_add_modes_noedid(connector, 1920, 1080); } else { amdgpu_dm_connector_ddc_get_modes(connector, edid); - amdgpu_dm_connector_add_common_modes(encoder, connector); + if (encoder) + amdgpu_dm_connector_add_common_modes(encoder, connector); amdgpu_dm_connector_add_freesync_modes(connector, edid); } amdgpu_dm_fbc_init(connector); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index a5e1a93ddaea..bb4e5ab7edc6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -182,6 +182,8 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) dc_sink_release(dc_sink); aconnector->dc_sink = NULL; aconnector->edid = NULL; + aconnector->dsc_aux = NULL; + port->passthrough_aux = NULL; }
aconnector->mst_status = MST_STATUS_DEFAULT; @@ -494,6 +496,8 @@ dm_dp_mst_detect(struct drm_connector *connector, dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; aconnector->edid = NULL; + aconnector->dsc_aux = NULL; + port->passthrough_aux = NULL;
amdgpu_dm_set_mst_status(&aconnector->mst_status, MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD, @@ -1233,14 +1237,6 @@ static bool is_dsc_need_re_compute( if (!aconnector || !aconnector->dsc_aux) continue;
- /* - * check if cached virtual MST DSC caps are available and DSC is supported - * as per specifications in their Virtual DPCD registers. - */ - if (!(aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported || - aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)) - continue; - stream_on_link[new_stream_on_link_num] = aconnector; new_stream_on_link_num++;
@@ -1268,6 +1264,9 @@ static bool is_dsc_need_re_compute( } }
+ if (new_stream_on_link_num == 0) + return false; + /* check current_state if there stream on link but it is not in * new request state */ @@ -1595,109 +1594,171 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream, return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16; }
+#if defined(CONFIG_DRM_AMD_DC_FP) +static bool dp_get_link_current_set_bw(struct drm_dp_aux *aux, uint32_t *cur_link_bw) +{ + uint32_t total_data_bw_efficiency_x10000 = 0; + uint32_t link_rate_per_lane_kbps = 0; + enum dc_link_rate link_rate; + union lane_count_set lane_count; + u8 dp_link_encoding; + u8 link_bw_set = 0; + + *cur_link_bw = 0; + + if (drm_dp_dpcd_read(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, &dp_link_encoding, 1) != 1 || + drm_dp_dpcd_read(aux, DP_LANE_COUNT_SET, &lane_count.raw, 1) != 1 || + drm_dp_dpcd_read(aux, DP_LINK_BW_SET, &link_bw_set, 1) != 1) + return false; + + switch (dp_link_encoding) { + case DP_8b_10b_ENCODING: + link_rate = link_bw_set; + link_rate_per_lane_kbps = link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE; + total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000; + total_data_bw_efficiency_x10000 /= 100; + total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100; + break; + case DP_128b_132b_ENCODING: + switch (link_bw_set) { + case DP_LINK_BW_10: + link_rate = LINK_RATE_UHBR10; + break; + case DP_LINK_BW_13_5: + link_rate = LINK_RATE_UHBR13_5; + break; + case DP_LINK_BW_20: + link_rate = LINK_RATE_UHBR20; + break; + default: + return false; + } + + link_rate_per_lane_kbps = link_rate * 10000; + total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000; + break; + default: + return false; + } + + *cur_link_bw = link_rate_per_lane_kbps * lane_count.bits.LANE_COUNT_SET / 10000 * total_data_bw_efficiency_x10000; + return true; +} +#endif + enum dc_status dm_dp_mst_is_port_support_mode( struct amdgpu_dm_connector *aconnector, struct dc_stream_state *stream) { - int pbn, branch_max_throughput_mps = 0; +#if defined(CONFIG_DRM_AMD_DC_FP) + int branch_max_throughput_mps = 0; struct dc_link_settings cur_link_settings; - unsigned int end_to_end_bw_in_kbps = 0; - unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0; + uint32_t end_to_end_bw_in_kbps = 0; + uint32_t root_link_bw_in_kbps = 0; + uint32_t virtual_channel_bw_in_kbps = 0; struct dc_dsc_bw_range bw_range = {0}; struct dc_dsc_config_options dsc_options = {0}; + uint32_t stream_kbps;
- /* - * Consider the case with the depth of the mst topology tree is equal or less than 2 - * A. When dsc bitstream can be transmitted along the entire path - * 1. dsc is possible between source and branch/leaf device (common dsc params is possible), AND - * 2. dsc passthrough supported at MST branch, or - * 3. dsc decoding supported at leaf MST device - * Use maximum dsc compression as bw constraint - * B. When dsc bitstream cannot be transmitted along the entire path - * Use native bw as bw constraint + /* DSC unnecessary case + * Check if timing could be supported within end-to-end BW */ - if (is_dsc_common_config_possible(stream, &bw_range) && - (aconnector->mst_output_port->passthrough_aux || - aconnector->dsc_aux == &aconnector->mst_output_port->aux)) { - cur_link_settings = stream->link->verified_link_cap; - upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings); - down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn); - - /* pick the end to end bw bottleneck */ - end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, down_link_bw_in_kbps); - - if (end_to_end_bw_in_kbps < bw_range.min_kbps) { - DRM_DEBUG_DRIVER("maximum dsc compression cannot fit into end-to-end bandwidth\n"); + stream_kbps = + dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(stream->link)); + cur_link_settings = stream->link->verified_link_cap; + root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings); + virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn); + + /* pick the end to end bw bottleneck */ + end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps); + + if (stream_kbps <= end_to_end_bw_in_kbps) { + DRM_DEBUG_DRIVER("No DSC needed. End-to-end bw sufficient."); + return DC_OK; + } + + /*DSC necessary case*/ + if (!aconnector->dsc_aux) + return DC_FAIL_BANDWIDTH_VALIDATE; + + if (is_dsc_common_config_possible(stream, &bw_range)) { + + /*capable of dsc passthough. dsc bitstream along the entire path*/ + if (aconnector->mst_output_port->passthrough_aux) { + if (bw_range.min_kbps > end_to_end_bw_in_kbps) { + DRM_DEBUG_DRIVER("DSC passthrough. Max dsc compression can't fit into end-to-end bw\n"); return DC_FAIL_BANDWIDTH_VALIDATE; - } + } + } else { + /*dsc bitstream decoded at the dp last link*/ + struct drm_dp_mst_port *immediate_upstream_port = NULL; + uint32_t end_link_bw = 0; + + /*Get last DP link BW capability*/ + if (dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw)) { + if (stream_kbps > end_link_bw) { + DRM_DEBUG_DRIVER("DSC decode at last link. Mode required bw can't fit into available bw\n"); + return DC_FAIL_BANDWIDTH_VALIDATE; + } + }
- if (end_to_end_bw_in_kbps < bw_range.stream_kbps) { - dc_dsc_get_default_config_option(stream->link->dc, &dsc_options); - dsc_options.max_target_bpp_limit_override_x16 = aconnector->base.display_info.max_dsc_bpp * 16; - if (dc_dsc_compute_config(stream->sink->ctx->dc->res_pool->dscs[0], - &stream->sink->dsc_caps.dsc_dec_caps, - &dsc_options, - end_to_end_bw_in_kbps, - &stream->timing, - dc_link_get_highest_encoding_format(stream->link), - &stream->timing.dsc_cfg)) { - stream->timing.flags.DSC = 1; - DRM_DEBUG_DRIVER("end-to-end bandwidth require dsc and dsc config found\n"); - } else { - DRM_DEBUG_DRIVER("end-to-end bandwidth require dsc but dsc config not found\n"); - return DC_FAIL_BANDWIDTH_VALIDATE; + /*Get virtual channel bandwidth between source and the link before the last link*/ + if (aconnector->mst_output_port->parent->port_parent) + immediate_upstream_port = aconnector->mst_output_port->parent->port_parent; + + if (immediate_upstream_port) { + virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn); + virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps); + if (bw_range.min_kbps > virtual_channel_bw_in_kbps) { + DRM_DEBUG_DRIVER("DSC decode at last link. Max dsc compression can't fit into MST available bw\n"); + return DC_FAIL_BANDWIDTH_VALIDATE; + } } } - } else { - /* Check if mode could be supported within max slot - * number of current mst link and full_pbn of mst links. - */ - int pbn_div, slot_num, max_slot_num; - enum dc_link_encoding_format link_encoding; - uint32_t stream_kbps = - dc_bandwidth_in_kbps_from_timing(&stream->timing, - dc_link_get_highest_encoding_format(stream->link)); - - pbn = kbps_to_peak_pbn(stream_kbps); - pbn_div = dm_mst_get_pbn_divider(stream->link); - slot_num = DIV_ROUND_UP(pbn, pbn_div); - - link_encoding = dc_link_get_highest_encoding_format(stream->link); - if (link_encoding == DC_LINK_ENCODING_DP_8b_10b) - max_slot_num = 63; - else if (link_encoding == DC_LINK_ENCODING_DP_128b_132b) - max_slot_num = 64; - else { - DRM_DEBUG_DRIVER("Invalid link encoding format\n"); - return DC_FAIL_BANDWIDTH_VALIDATE; - }
- if (slot_num > max_slot_num || - pbn > aconnector->mst_output_port->full_pbn) { - DRM_DEBUG_DRIVER("Mode can not be supported within mst links!"); + /*Confirm if we can obtain dsc config*/ + dc_dsc_get_default_config_option(stream->link->dc, &dsc_options); + dsc_options.max_target_bpp_limit_override_x16 = aconnector->base.display_info.max_dsc_bpp * 16; + if (dc_dsc_compute_config(stream->sink->ctx->dc->res_pool->dscs[0], + &stream->sink->dsc_caps.dsc_dec_caps, + &dsc_options, + end_to_end_bw_in_kbps, + &stream->timing, + dc_link_get_highest_encoding_format(stream->link), + &stream->timing.dsc_cfg)) { + stream->timing.flags.DSC = 1; + DRM_DEBUG_DRIVER("Require dsc and dsc config found\n"); + } else { + DRM_DEBUG_DRIVER("Require dsc but can't find appropriate dsc config\n"); return DC_FAIL_BANDWIDTH_VALIDATE; } - }
- /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */ - switch (stream->timing.pixel_encoding) { - case PIXEL_ENCODING_RGB: - case PIXEL_ENCODING_YCBCR444: - branch_max_throughput_mps = - aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_0_mps; - break; - case PIXEL_ENCODING_YCBCR422: - case PIXEL_ENCODING_YCBCR420: - branch_max_throughput_mps = - aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_1_mps; - break; - default: - break; - } + /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */ + switch (stream->timing.pixel_encoding) { + case PIXEL_ENCODING_RGB: + case PIXEL_ENCODING_YCBCR444: + branch_max_throughput_mps = + aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_0_mps; + break; + case PIXEL_ENCODING_YCBCR422: + case PIXEL_ENCODING_YCBCR420: + branch_max_throughput_mps = + aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_1_mps; + break; + default: + break; + }
- if (branch_max_throughput_mps != 0 && - ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000)) + if (branch_max_throughput_mps != 0 && + ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000)) { + DRM_DEBUG_DRIVER("DSC is required but max throughput mps fails"); return DC_FAIL_BANDWIDTH_VALIDATE; - + } + } else { + DRM_DEBUG_DRIVER("DSC is required but can't find common dsc config."); + return DC_FAIL_BANDWIDTH_VALIDATE; + } +#endif return DC_OK; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 15819416a2f3..8ed599324693 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -2267,6 +2267,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
otg_master = resource_get_otg_master_for_stream( &state->res_ctx, state->streams[stream_idx]); + + if (!otg_master) + continue; + resource_log_pipe_for_stream(dc, state, otg_master, stream_idx); } if (state->phantom_stream_count > 0) { @@ -2508,6 +2512,17 @@ static void remove_hpo_dp_link_enc_from_ctx(struct resource_context *res_ctx, } }
+static int get_num_of_free_pipes(const struct resource_pool *pool, const struct dc_state *context) +{ + int i; + int count = 0; + + for (i = 0; i < pool->pipe_count; i++) + if (resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], FREE_PIPE)) + count++; + return count; +} + enum dc_status resource_add_otg_master_for_stream_output(struct dc_state *new_ctx, const struct resource_pool *pool, struct dc_stream_state *stream) @@ -2641,37 +2656,33 @@ static bool acquire_secondary_dpp_pipes_and_add_plane( struct dc_state *cur_ctx, struct resource_pool *pool) { - struct pipe_ctx *opp_head_pipe, *sec_pipe, *tail_pipe; + struct pipe_ctx *sec_pipe, *tail_pipe; + struct pipe_ctx *opp_heads[MAX_PIPES]; + int opp_head_count; + int i;
if (!pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe) { ASSERT(0); return false; }
- opp_head_pipe = otg_master_pipe; - while (opp_head_pipe) { + opp_head_count = resource_get_opp_heads_for_otg_master(otg_master_pipe, + &new_ctx->res_ctx, opp_heads); + if (get_num_of_free_pipes(pool, new_ctx) < opp_head_count) + /* not enough free pipes */ + return false; + + for (i = 0; i < opp_head_count; i++) { sec_pipe = pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe( cur_ctx, new_ctx, pool, - opp_head_pipe); - if (!sec_pipe) { - /* try tearing down MPCC combine */ - int pipe_idx = acquire_first_split_pipe( - &new_ctx->res_ctx, pool, - otg_master_pipe->stream); - - if (pipe_idx >= 0) - sec_pipe = &new_ctx->res_ctx.pipe_ctx[pipe_idx]; - } - - if (!sec_pipe) - return false; - + opp_heads[i]); + ASSERT(sec_pipe); sec_pipe->plane_state = plane_state;
/* establish pipe relationship */ - tail_pipe = get_tail_pipe(opp_head_pipe); + tail_pipe = get_tail_pipe(opp_heads[i]); tail_pipe->bottom_pipe = sec_pipe; sec_pipe->top_pipe = tail_pipe; sec_pipe->bottom_pipe = NULL; @@ -2682,8 +2693,6 @@ static bool acquire_secondary_dpp_pipes_and_add_plane( } else { sec_pipe->prev_odm_pipe = NULL; } - - opp_head_pipe = opp_head_pipe->next_odm_pipe; } return true; } @@ -2696,6 +2705,7 @@ bool resource_append_dpp_pipes_for_plane_composition( struct dc_plane_state *plane_state) { bool success; + if (otg_master_pipe->plane_state == NULL) success = add_plane_to_opp_head_pipes(otg_master_pipe, plane_state, new_ctx); @@ -2703,10 +2713,15 @@ bool resource_append_dpp_pipes_for_plane_composition( success = acquire_secondary_dpp_pipes_and_add_plane( otg_master_pipe, plane_state, new_ctx, cur_ctx, pool); - if (success) + if (success) { /* when appending a plane mpc slice count changes from 0 to 1 */ success = update_pipe_params_after_mpc_slice_count_change( plane_state, new_ctx, pool); + if (!success) + resource_remove_dpp_pipes_for_plane_composition(new_ctx, + pool, plane_state); + } + return success; }
@@ -2716,6 +2731,7 @@ void resource_remove_dpp_pipes_for_plane_composition( const struct dc_plane_state *plane_state) { int i; + for (i = pool->pipe_count - 1; i >= 0; i--) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index 76bb05f4d6bf..52a1cfc5feed 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -437,6 +437,19 @@ enum dc_status dc_state_remove_stream( return DC_OK; }
+static void remove_mpc_combine_for_stream(const struct dc *dc, + struct dc_state *new_ctx, + const struct dc_state *cur_ctx, + struct dc_stream_status *status) +{ + int i; + + for (i = 0; i < status->plane_count; i++) + resource_update_pipes_for_plane_with_slice_count( + new_ctx, cur_ctx, dc->res_pool, + status->plane_states[i], 1); +} + bool dc_state_add_plane( const struct dc *dc, struct dc_stream_state *stream, @@ -447,8 +460,12 @@ bool dc_state_add_plane( struct pipe_ctx *otg_master_pipe; struct dc_stream_status *stream_status = NULL; bool added = false; + int odm_slice_count; + int i;
stream_status = dc_state_get_stream_status(state, stream); + otg_master_pipe = resource_get_otg_master_for_stream( + &state->res_ctx, stream); if (stream_status == NULL) { dm_error("Existing stream not found; failed to attach surface!\n"); goto out; @@ -456,22 +473,39 @@ bool dc_state_add_plane( dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n", plane_state, MAX_SURFACE_NUM); goto out; + } else if (!otg_master_pipe) { + goto out; }
- if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm) - /* ODM combine could prevent us from supporting more planes - * we will reset ODM slice count back to 1 when all planes have - * been removed to maximize the amount of planes supported when - * new planes are added. - */ - resource_update_pipes_for_stream_with_slice_count( - state, dc->current_state, dc->res_pool, stream, 1); + added = resource_append_dpp_pipes_for_plane_composition(state, + dc->current_state, pool, otg_master_pipe, plane_state);
- otg_master_pipe = resource_get_otg_master_for_stream( - &state->res_ctx, stream); - if (otg_master_pipe) + if (!added) { + /* try to remove MPC combine to free up pipes */ + for (i = 0; i < state->stream_count; i++) + remove_mpc_combine_for_stream(dc, state, + dc->current_state, + &state->stream_status[i]); added = resource_append_dpp_pipes_for_plane_composition(state, - dc->current_state, pool, otg_master_pipe, plane_state); + dc->current_state, pool, + otg_master_pipe, plane_state); + } + + if (!added) { + /* try to decrease ODM slice count gradually to free up pipes */ + odm_slice_count = resource_get_odm_slice_count(otg_master_pipe); + for (i = odm_slice_count - 1; i > 0; i--) { + resource_update_pipes_for_stream_with_slice_count(state, + dc->current_state, dc->res_pool, stream, + i); + added = resource_append_dpp_pipes_for_plane_composition( + state, + dc->current_state, pool, + otg_master_pipe, plane_state); + if (added) + break; + } + }
if (added) { stream_status->plane_states[stream_status->plane_count] = @@ -531,15 +565,6 @@ bool dc_state_remove_plane(
stream_status->plane_states[stream_status->plane_count] = NULL;
- if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm) - /* ODM combine could prevent us from supporting more planes - * we will reset ODM slice count back to 1 when all planes have - * been removed to maximize the amount of planes supported when - * new planes are added. - */ - resource_update_pipes_for_stream_with_slice_count( - state, dc->current_state, dc->res_pool, stream, 1); - return true; }
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c index 4f559a025cf0..09cf54586fd5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -84,7 +84,7 @@ static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait,
cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data);
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
/* Below loops 1000 x 500us = 500 ms. * Exit REPLAY may need to wait 1-2 frames to power up. Timeout after at @@ -127,7 +127,7 @@ static void dmub_replay_set_power_opt(struct dmub_replay *dmub, unsigned int pow cmd.replay_set_power_opt.replay_set_power_opt_data.power_opt = power_opt; cmd.replay_set_power_opt.replay_set_power_opt_data.panel_inst = panel_inst;
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); }
/* @@ -209,8 +209,7 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub, else copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0;
- - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true; } @@ -231,7 +230,7 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub, cmd.replay_set_coasting_vtotal.header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data); cmd.replay_set_coasting_vtotal.replay_set_coasting_vtotal_data.coasting_vtotal = coasting_vtotal;
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); }
/* diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index 0c4aef8ffe2c..3306684e805a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -288,6 +288,7 @@ static void dcn10_log_color_state(struct dc *dc, { struct dc_context *dc_ctx = dc->ctx; struct resource_pool *pool = dc->res_pool; + bool is_gamut_remap_available = false; int i;
DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode" @@ -300,16 +301,15 @@ static void dcn10_log_color_state(struct dc *dc, struct dcn_dpp_state s = {0};
dpp->funcs->dpp_read_state(dpp, &s); - dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); + if (dpp->funcs->dpp_get_gamut_remap) { + dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); + is_gamut_remap_available = true; + }
if (!s.is_enabled) continue;
- DTN_INFO("[%2d]: %11xh %11s %9s %9s" - " %12s " - "%010lld %010lld %010lld %010lld " - "%010lld %010lld %010lld %010lld " - "%010lld %010lld %010lld %010lld", + DTN_INFO("[%2d]: %11xh %11s %9s %9s", dpp->inst, s.igam_input_format, (s.igam_lut_mode == 0) ? "BypassFixed" : @@ -328,22 +328,27 @@ static void dcn10_log_color_state(struct dc *dc, ((s.rgam_lut_mode == 2) ? "Ycc" : ((s.rgam_lut_mode == 3) ? "RAM" : ((s.rgam_lut_mode == 4) ? "RAM" : - "Unknown")))), - (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : - ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : - "SW"), - s.gamut_remap.temperature_matrix[0].value, - s.gamut_remap.temperature_matrix[1].value, - s.gamut_remap.temperature_matrix[2].value, - s.gamut_remap.temperature_matrix[3].value, - s.gamut_remap.temperature_matrix[4].value, - s.gamut_remap.temperature_matrix[5].value, - s.gamut_remap.temperature_matrix[6].value, - s.gamut_remap.temperature_matrix[7].value, - s.gamut_remap.temperature_matrix[8].value, - s.gamut_remap.temperature_matrix[9].value, - s.gamut_remap.temperature_matrix[10].value, - s.gamut_remap.temperature_matrix[11].value); + "Unknown"))))); + if (is_gamut_remap_available) + DTN_INFO(" %12s " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld", + (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : + ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : "SW"), + s.gamut_remap.temperature_matrix[0].value, + s.gamut_remap.temperature_matrix[1].value, + s.gamut_remap.temperature_matrix[2].value, + s.gamut_remap.temperature_matrix[3].value, + s.gamut_remap.temperature_matrix[4].value, + s.gamut_remap.temperature_matrix[5].value, + s.gamut_remap.temperature_matrix[6].value, + s.gamut_remap.temperature_matrix[7].value, + s.gamut_remap.temperature_matrix[8].value, + s.gamut_remap.temperature_matrix[9].value, + s.gamut_remap.temperature_matrix[10].value, + s.gamut_remap.temperature_matrix[11].value); + DTN_INFO("\n"); } DTN_INFO("\n"); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index ed9141a67db3..5b09d95cc5b8 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -919,6 +919,9 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) stream = dc->current_state->streams[0]; plane = (stream ? dc->current_state->stream_status[0].plane_states[0] : NULL);
+ if (!stream || !plane) + return false; + if (stream && plane) { cursor_cache_enable = stream->cursor_position.enable && plane->address.grph.cursor_cache_addr.quad_part; diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c index 3e6c7be7e278..5302d2c9c760 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c @@ -165,7 +165,12 @@ static void set_hpo_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *lin link_res->hpo_dp_link_enc->funcs->set_link_test_pattern( link_res->hpo_dp_link_enc, tp_params); } + link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); + + // Give retimer extra time to lock before updating DP_TRAINING_PATTERN_SET to TPS1 + if (tp_params->dp_phy_pattern == DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE) + msleep(30); }
static void set_hpo_fixed_vs_pe_retimer_dp_lane_settings(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index b53ad18dbfbc..ec9ff5f8bdc5 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -2313,8 +2313,6 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
dc->hwss.disable_audio_stream(pipe_ctx);
- edp_set_panel_assr(link, pipe_ctx, &panel_mode_dp, false); - update_psp_stream_config(pipe_ctx, true); dc->hwss.blank_stream(pipe_ctx);
@@ -2368,6 +2366,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx) dc->hwss.disable_stream(pipe_ctx); disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); } + edp_set_panel_assr(link, pipe_ctx, &panel_mode_dp, false);
if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal)) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c index 0fcf0b8530ac..564246983f37 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -454,7 +454,8 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link, * If we got sink count changed it means * Downstream port status changed, * then DM should call DC to do the detection. - * NOTE: Do not handle link loss on eDP since it is internal link*/ + * NOTE: Do not handle link loss on eDP since it is internal link + */ if ((link->connector_signal != SIGNAL_TYPE_EDP) && dp_parse_link_loss_status( link, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index 0a939437e19f..6b380e037e3f 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -2193,10 +2193,11 @@ bool dcn20_get_dcc_compression_cap(const struct dc *dc, const struct dc_dcc_surface_param *input, struct dc_surface_dcc_cap *output) { - return dc->res_pool->hubbub->funcs->get_dcc_compression_cap( - dc->res_pool->hubbub, - input, - output); + if (dc->res_pool->hubbub->funcs->get_dcc_compression_cap) + return dc->res_pool->hubbub->funcs->get_dcc_compression_cap( + dc->res_pool->hubbub, input, output); + + return false; }
static void dcn20_destroy_resource_pool(struct resource_pool **pool) diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 5fb21a0508cd..f531ce1d2b1d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -929,7 +929,7 @@ static int pp_dpm_switch_power_profile(void *handle, enum PP_SMC_POWER_PROFILE type, bool en) { struct pp_hwmgr *hwmgr = handle; - long workload; + long workload[1]; uint32_t index;
if (!hwmgr || !hwmgr->pm_en) @@ -947,12 +947,12 @@ static int pp_dpm_switch_power_profile(void *handle, hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]); index = fls(hwmgr->workload_mask); index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0; - workload = hwmgr->workload_setting[index]; + workload[0] = hwmgr->workload_setting[index]; } else { hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]); index = fls(hwmgr->workload_mask); index = index <= Workload_Policy_Max ? index - 1 : 0; - workload = hwmgr->workload_setting[index]; + workload[0] = hwmgr->workload_setting[index]; }
if (type == PP_SMC_POWER_PROFILE_COMPUTE && @@ -962,7 +962,7 @@ static int pp_dpm_switch_power_profile(void *handle, }
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) - hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0); + hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0);
return 0; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c index 1d829402cd2e..f4bd8e9357e2 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c @@ -269,7 +269,7 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set struct pp_power_state *new_ps) { uint32_t index; - long workload; + long workload[1];
if (hwmgr->not_vf) { if (!skip_display_settings) @@ -294,10 +294,10 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { index = fls(hwmgr->workload_mask); index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0; - workload = hwmgr->workload_setting[index]; + workload[0] = hwmgr->workload_setting[index];
- if (hwmgr->power_profile_mode != workload && hwmgr->hwmgr_func->set_power_profile_mode) - hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0); + if (hwmgr->power_profile_mode != workload[0] && hwmgr->hwmgr_func->set_power_profile_mode) + hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0); }
return 0; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index 1fcd4451001f..f1c369945ac5 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -2957,6 +2957,7 @@ static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr)
static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; struct smu7_hwmgr *data; int result = 0;
@@ -2993,40 +2994,37 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) /* Initalize Dynamic State Adjustment Rule Settings */ result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
- if (0 == result) { - struct amdgpu_device *adev = hwmgr->adev; + if (result) + goto fail;
- data->is_tlu_enabled = false; + data->is_tlu_enabled = false;
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = SMU7_MAX_HARDWARE_POWERLEVELS; - hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; - hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; + hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; + hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
- data->pcie_gen_cap = adev->pm.pcie_gen_mask; - if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) - data->pcie_spc_cap = 20; - else - data->pcie_spc_cap = 16; - data->pcie_lane_cap = adev->pm.pcie_mlw_mask; - - hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ -/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ - hwmgr->platform_descriptor.clockStep.engineClock = 500; - hwmgr->platform_descriptor.clockStep.memoryClock = 500; - smu7_thermal_parameter_init(hwmgr); - } else { - /* Ignore return value in here, we are cleaning up a mess. */ - smu7_hwmgr_backend_fini(hwmgr); - } + data->pcie_gen_cap = adev->pm.pcie_gen_mask; + if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) + data->pcie_spc_cap = 20; + else + data->pcie_spc_cap = 16; + data->pcie_lane_cap = adev->pm.pcie_mlw_mask; + + hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ + /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ + hwmgr->platform_descriptor.clockStep.engineClock = 500; + hwmgr->platform_descriptor.clockStep.memoryClock = 500; + smu7_thermal_parameter_init(hwmgr);
result = smu7_update_edc_leakage_table(hwmgr); - if (result) { - smu7_hwmgr_backend_fini(hwmgr); - return result; - } + if (result) + goto fail;
return 0; +fail: + smu7_hwmgr_backend_fini(hwmgr); + return result; }
static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) @@ -3316,8 +3314,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, const struct pp_power_state *current_ps) { struct amdgpu_device *adev = hwmgr->adev; - struct smu7_power_state *smu7_ps = - cast_phw_smu7_power_state(&request_ps->hardware); + struct smu7_power_state *smu7_ps; uint32_t sclk; uint32_t mclk; struct PP_Clocks minimum_clocks = {0}; @@ -3334,6 +3331,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, uint32_t latency; bool latency_allowed = false;
+ smu7_ps = cast_phw_smu7_power_state(&request_ps->hardware); + if (!smu7_ps) + return -EINVAL; + data->battery_state = (PP_StateUILabel_Battery == request_ps->classification.ui_label); data->mclk_ignore_signal = false; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c index b015a601b385..eb744401e056 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c @@ -1065,16 +1065,18 @@ static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, struct pp_power_state *prequest_ps, const struct pp_power_state *pcurrent_ps) { - struct smu8_power_state *smu8_ps = - cast_smu8_power_state(&prequest_ps->hardware); - - const struct smu8_power_state *smu8_current_ps = - cast_const_smu8_power_state(&pcurrent_ps->hardware); - + struct smu8_power_state *smu8_ps; + const struct smu8_power_state *smu8_current_ps; struct smu8_hwmgr *data = hwmgr->backend; struct PP_Clocks clocks = {0, 0, 0, 0}; bool force_high;
+ smu8_ps = cast_smu8_power_state(&prequest_ps->hardware); + smu8_current_ps = cast_const_smu8_power_state(&pcurrent_ps->hardware); + + if (!smu8_ps || !smu8_current_ps) + return -EINVAL; + smu8_ps->need_dfs_bypass = true;
data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index 9f5bd998c6bf..f4acdb226741 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -3259,8 +3259,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, const struct pp_power_state *current_ps) { struct amdgpu_device *adev = hwmgr->adev; - struct vega10_power_state *vega10_ps = - cast_phw_vega10_power_state(&request_ps->hardware); + struct vega10_power_state *vega10_ps; uint32_t sclk; uint32_t mclk; struct PP_Clocks minimum_clocks = {0}; @@ -3278,6 +3277,10 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; uint32_t latency;
+ vega10_ps = cast_phw_vega10_power_state(&request_ps->hardware); + if (!vega10_ps) + return -EINVAL; + data->battery_state = (PP_StateUILabel_Battery == request_ps->classification.ui_label);
@@ -3415,13 +3418,17 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co const struct vega10_power_state *vega10_ps = cast_const_phw_vega10_power_state(states->pnew_state); struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); - uint32_t sclk = vega10_ps->performance_levels - [vega10_ps->performance_level_count - 1].gfx_clock; struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); - uint32_t mclk = vega10_ps->performance_levels - [vega10_ps->performance_level_count - 1].mem_clock; + uint32_t sclk, mclk; uint32_t i;
+ if (vega10_ps == NULL) + return -EINVAL; + sclk = vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].gfx_clock; + mclk = vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].mem_clock; + for (i = 0; i < sclk_table->count; i++) { if (sclk == sclk_table->dpm_levels[i].value) break; @@ -3728,6 +3735,9 @@ static int vega10_generate_dpm_level_enable_mask( cast_const_phw_vega10_power_state(states->pnew_state); int i;
+ if (vega10_ps == NULL) + return -EINVAL; + PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps), "Attempt to Trim DPM States Failed!", return -1); @@ -4995,6 +5005,8 @@ static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
vega10_psa = cast_const_phw_vega10_power_state(pstate1); vega10_psb = cast_const_phw_vega10_power_state(pstate2); + if (vega10_psa == NULL || vega10_psb == NULL) + return -EINVAL;
/* If the two states don't even have the same number of performance levels * they cannot be the same state. @@ -5128,6 +5140,8 @@ static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) return -EINVAL;
vega10_ps = cast_phw_vega10_power_state(&ps->hardware); + if (vega10_ps == NULL) + return -EINVAL;
vega10_ps->performance_levels [vega10_ps->performance_level_count - 1].gfx_clock = @@ -5179,6 +5193,8 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) return -EINVAL;
vega10_ps = cast_phw_vega10_power_state(&ps->hardware); + if (vega10_ps == NULL) + return -EINVAL;
vega10_ps->performance_levels [vega10_ps->performance_level_count - 1].mem_clock = @@ -5420,6 +5436,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr) return;
vega10_ps = cast_phw_vega10_power_state(&ps->hardware); + if (vega10_ps == NULL) + return; + max_level = vega10_ps->performance_level_count - 1;
if (vega10_ps->performance_levels[max_level].gfx_clock != @@ -5442,6 +5461,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
ps = (struct pp_power_state *)((unsigned long)(hwmgr->ps) + hwmgr->ps_size * (hwmgr->num_ps - 1)); vega10_ps = cast_phw_vega10_power_state(&ps->hardware); + if (vega10_ps == NULL) + return; + max_level = vega10_ps->performance_level_count - 1;
if (vega10_ps->performance_levels[max_level].gfx_clock != @@ -5632,6 +5654,8 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_ return -EINVAL;
vega10_ps = cast_const_phw_vega10_power_state(state); + if (vega10_ps == NULL) + return -EINVAL;
i = index > vega10_ps->performance_level_count - 1 ? vega10_ps->performance_level_count - 1 : index; diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index e1796ecf9c05..06409133b09b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -2220,7 +2220,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, { int ret = 0; int index = 0; - long workload; + long workload[1]; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
if (!skip_display_settings) { @@ -2260,10 +2260,10 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { index = fls(smu->workload_mask); index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload = smu->workload_setting[index]; + workload[0] = smu->workload_setting[index];
- if (smu->power_profile_mode != workload) - smu_bump_power_profile_mode(smu, &workload, 0); + if (smu->power_profile_mode != workload[0]) + smu_bump_power_profile_mode(smu, workload, 0); }
return ret; @@ -2313,7 +2313,7 @@ static int smu_switch_power_profile(void *handle, { struct smu_context *smu = handle; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - long workload; + long workload[1]; uint32_t index;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2326,17 +2326,17 @@ static int smu_switch_power_profile(void *handle, smu->workload_mask &= ~(1 << smu->workload_prority[type]); index = fls(smu->workload_mask); index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload = smu->workload_setting[index]; + workload[0] = smu->workload_setting[index]; } else { smu->workload_mask |= (1 << smu->workload_prority[type]); index = fls(smu->workload_mask); index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload = smu->workload_setting[index]; + workload[0] = smu->workload_setting[index]; }
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) - smu_bump_power_profile_mode(smu, &workload, 0); + smu_bump_power_profile_mode(smu, workload, 0);
return 0; } diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c index 6a4f20fccf84..7b0bc9704eac 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c @@ -1027,7 +1027,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, u32 status_reg; u8 *buffer = msg->buffer; unsigned int i; - int num_transferred = 0; int ret;
/* Buffer size of AUX CH is 16 bytes */ @@ -1079,7 +1078,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, reg = buffer[i]; writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0 + 4 * i); - num_transferred++; } }
@@ -1127,7 +1125,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 + 4 * i); buffer[i] = (unsigned char)reg; - num_transferred++; } }
@@ -1144,7 +1141,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, (msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ) msg->reply = DP_AUX_NATIVE_REPLY_ACK;
- return num_transferred > 0 ? num_transferred : -EBUSY; + return msg->size;
aux_error: /* if aux err happen, reset aux */ diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index 68831f4e502a..fc2ceae61db2 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -4069,6 +4069,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { const struct drm_dp_connection_status_notify *conn_stat = &up_req->msg.u.conn_stat; + bool handle_csn;
drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", conn_stat->port_number, @@ -4077,6 +4078,16 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) conn_stat->message_capability_status, conn_stat->input_port, conn_stat->peer_device_type); + + mutex_lock(&mgr->probe_lock); + handle_csn = mgr->mst_primary->link_address_sent; + mutex_unlock(&mgr->probe_lock); + + if (!handle_csn) { + drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it."); + kfree(up_req); + goto out; + } } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { const struct drm_dp_resource_status_notify *res_stat = &up_req->msg.u.resource_stat; diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 02b1235c6d61..106292d6ed26 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -1067,23 +1067,16 @@ int drm_atomic_set_property(struct drm_atomic_state *state, }
if (async_flip && - prop != config->prop_fb_id && - prop != config->prop_in_fence_fd && - prop != config->prop_fb_damage_clips) { + (plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY || + (prop != config->prop_fb_id && + prop != config->prop_in_fence_fd && + prop != config->prop_fb_damage_clips))) { ret = drm_atomic_plane_get_property(plane, plane_state, prop, &old_val); ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop); break; }
- if (async_flip && plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY) { - drm_dbg_atomic(prop->dev, - "[OBJECT:%d] Only primary planes can be changed during async flip\n", - obj->id); - ret = -EINVAL; - break; - } - ret = drm_atomic_plane_set_property(plane, plane_state, file_priv, prop, prop_value); diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index 31af5cf37a09..cee5eafbfb81 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -880,6 +880,11 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
kfree(modeset->mode); modeset->mode = drm_mode_duplicate(dev, mode); + if (!modeset->mode) { + ret = -ENOMEM; + break; + } + drm_connector_get(connector); modeset->connectors[modeset->num_connectors++] = connector; modeset->x = offset->x; diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index 071668bfe5d1..6c3333136737 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -1449,6 +1449,9 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
static int cnp_num_backlight_controllers(struct drm_i915_private *i915) { + if (INTEL_PCH_TYPE(i915) >= PCH_MTL) + return 2; + if (INTEL_PCH_TYPE(i915) >= PCH_DG1) return 1;
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index 0ccbf9a85914..eca07436d1bc 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -351,6 +351,9 @@ static int intel_num_pps(struct drm_i915_private *i915) if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) return 2;
+ if (INTEL_PCH_TYPE(i915) >= PCH_MTL) + return 2; + if (INTEL_PCH_TYPE(i915) >= PCH_DG1) return 1;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index a2195e28b625..cac6d4184506 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -290,6 +290,41 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) return i915_error_to_vmf_fault(err); }
+static void set_address_limits(struct vm_area_struct *area, + struct i915_vma *vma, + unsigned long obj_offset, + unsigned long *start_vaddr, + unsigned long *end_vaddr) +{ + unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */ + long start, end; /* memory boundaries */ + + /* + * Let's move into the ">> PAGE_SHIFT" + * domain to be sure not to lose bits + */ + vm_start = area->vm_start >> PAGE_SHIFT; + vm_end = area->vm_end >> PAGE_SHIFT; + vma_size = vma->size >> PAGE_SHIFT; + + /* + * Calculate the memory boundaries by considering the offset + * provided by the user during memory mapping and the offset + * provided for the partial mapping. + */ + start = vm_start; + start -= obj_offset; + start += vma->gtt_view.partial.offset; + end = start + vma_size; + + start = max_t(long, start, vm_start); + end = min_t(long, end, vm_end); + + /* Let's move back into the "<< PAGE_SHIFT" domain */ + *start_vaddr = (unsigned long)start << PAGE_SHIFT; + *end_vaddr = (unsigned long)end << PAGE_SHIFT; +} + static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) { #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) @@ -302,14 +337,18 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) struct i915_ggtt *ggtt = to_gt(i915)->ggtt; bool write = area->vm_flags & VM_WRITE; struct i915_gem_ww_ctx ww; + unsigned long obj_offset; + unsigned long start, end; /* memory boundaries */ intel_wakeref_t wakeref; struct i915_vma *vma; pgoff_t page_offset; + unsigned long pfn; int srcu; int ret;
- /* We don't use vmf->pgoff since that has the fake offset */ + obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node); page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; + page_offset += obj_offset;
trace_i915_gem_object_fault(obj, page_offset, true, write);
@@ -402,12 +441,14 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) if (ret) goto err_unpin;
+ set_address_limits(area, vma, obj_offset, &start, &end); + + pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT; + pfn += (start - area->vm_start) >> PAGE_SHIFT; + pfn += obj_offset - vma->gtt_view.partial.offset; + /* Finally, remap it using the new GTT offset */ - ret = remap_io_mapping(area, - area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT), - (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT, - min_t(u64, vma->size, area->vm_end - area->vm_start), - &ggtt->iomap); + ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap); if (ret) goto err_fence;
@@ -1084,6 +1125,8 @@ int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma mmo = mmap_offset_attach(obj, mmap_type, NULL); if (IS_ERR(mmo)) return PTR_ERR(mmo); + + vma->vm_pgoff += drm_vma_node_start(&mmo->vma_node); }
/* diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index e6f177183c0f..5c72462d1f57 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -165,7 +165,6 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj, i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] : obj->mm.region, &places[0], obj->bo_offset, obj->base.size, flags); - places[0].flags |= TTM_PL_FLAG_DESIRED;
/* Cache this on object? */ for (i = 0; i < num_allowed; ++i) { @@ -779,13 +778,16 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, .interruptible = true, .no_wait_gpu = false, }; - int real_num_busy; + struct ttm_placement initial_placement; + struct ttm_place initial_place; int ret;
/* First try only the requested placement. No eviction. */ - real_num_busy = placement->num_placement; - placement->num_placement = 1; - ret = ttm_bo_validate(bo, placement, &ctx); + initial_placement.num_placement = 1; + memcpy(&initial_place, placement->placement, sizeof(struct ttm_place)); + initial_place.flags |= TTM_PL_FLAG_DESIRED; + initial_placement.placement = &initial_place; + ret = ttm_bo_validate(bo, &initial_placement, &ctx); if (ret) { ret = i915_ttm_err_to_gem(ret); /* @@ -800,7 +802,6 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, * If the initial attempt fails, allow all accepted placements, * evicting if necessary. */ - placement->num_placement = real_num_busy; ret = ttm_bo_validate(bo, placement, &ctx); if (ret) return i915_ttm_err_to_gem(ret); diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c index 739c865b556f..10bce18b7c31 100644 --- a/drivers/gpu/drm/lima/lima_drv.c +++ b/drivers/gpu/drm/lima/lima_drv.c @@ -501,3 +501,4 @@ module_platform_driver(lima_platform_driver); MODULE_AUTHOR("Lima Project Developers"); MODULE_DESCRIPTION("Lima DRM Driver"); MODULE_LICENSE("GPL v2"); +MODULE_SOFTDEP("pre: governor_simpleondemand"); diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c index 423eb302be7e..4caeb68f3010 100644 --- a/drivers/gpu/drm/mgag200/mgag200_i2c.c +++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c @@ -31,6 +31,8 @@ #include <linux/i2c.h> #include <linux/pci.h>
+#include <drm/drm_managed.h> + #include "mgag200_drv.h"
static int mga_i2c_read_gpio(struct mga_device *mdev) @@ -86,7 +88,7 @@ static int mga_gpio_getscl(void *data) return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0; }
-static void mgag200_i2c_release(void *res) +static void mgag200_i2c_release(struct drm_device *dev, void *res) { struct mga_i2c_chan *i2c = res;
@@ -114,7 +116,7 @@ int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c) i2c->adapter.algo_data = &i2c->bit;
i2c->bit.udelay = 10; - i2c->bit.timeout = 2; + i2c->bit.timeout = usecs_to_jiffies(2200); i2c->bit.data = i2c; i2c->bit.setsda = mga_gpio_setsda; i2c->bit.setscl = mga_gpio_setscl; @@ -125,5 +127,5 @@ int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c) if (ret) return ret;
- return devm_add_action_or_reset(dev->dev, mgag200_i2c_release, i2c); + return drmm_add_action_or_reset(dev, mgag200_i2c_release, i2c); } diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h index b7f22597ee95..969a8fb0ee9e 100644 --- a/drivers/gpu/drm/radeon/pptable.h +++ b/drivers/gpu/drm/radeon/pptable.h @@ -439,7 +439,7 @@ typedef struct _StateArray{ //how many states we have UCHAR ucNumEntries;
- ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries); + ATOM_PPLIB_STATE_V2 states[] /* __counted_by(ucNumEntries) */; }StateArray;
diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c index 91202e40cde9..60c652782761 100644 --- a/drivers/gpu/drm/tests/drm_gem_shmem_test.c +++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c @@ -102,6 +102,17 @@ static void drm_gem_shmem_test_obj_create_private(struct kunit *test)
sg_init_one(sgt->sgl, buf, TEST_SIZE);
+ /* + * Set the DMA mask to 64-bits and map the sgtables + * otherwise drm_gem_shmem_free will cause a warning + * on debug kernels. + */ + ret = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(64)); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = dma_map_sgtable(drm_dev->dev, sgt, DMA_BIDIRECTIONAL, 0); + KUNIT_ASSERT_EQ(test, ret, 0); + /* Init a mock DMA-BUF */ buf_mock.size = TEST_SIZE; attach_mock.dmabuf = &buf_mock; diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h index af71b87d8030..03c6d4d50a83 100644 --- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h @@ -44,9 +44,10 @@ #define GSCCS_RING_BASE 0x11a000
#define RING_TAIL(base) XE_REG((base) + 0x30) +#define TAIL_ADDR REG_GENMASK(20, 3)
#define RING_HEAD(base) XE_REG((base) + 0x34) -#define HEAD_ADDR 0x001FFFFC +#define HEAD_ADDR REG_GENMASK(20, 2)
#define RING_START(base) XE_REG((base) + 0x38)
@@ -135,7 +136,6 @@ #define RING_VALID_MASK 0x00000001 #define RING_VALID 0x00000001 #define STOP_RING REG_BIT(8) -#define TAIL_ADDR 0x001FFFF8
#define RING_CTX_TIMESTAMP(base) XE_REG((base) + 0x3a8) #define CSBE_DEBUG_STATUS(base) XE_REG((base) + 0x3fc) diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index e4e3658e6a13..0f42971ff0a8 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -1429,8 +1429,8 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q) !xe_sched_job_completed(job)) || xe_sched_invalidate_job(job, 2)) { trace_xe_sched_job_ban(job); - xe_sched_tdr_queue_imm(&q->guc->sched); set_exec_queue_banned(q); + xe_sched_tdr_queue_imm(&q->guc->sched); } } } diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c index 453e601ddd5e..d37f1dea9f8b 100644 --- a/drivers/gpu/drm/xe/xe_hwmon.c +++ b/drivers/gpu/drm/xe/xe_hwmon.c @@ -200,9 +200,10 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long va PKG_PWR_LIM_1_EN, 0, channel);
if (reg_val & PKG_PWR_LIM_1_EN) { + drm_warn(>_to_xe(hwmon->gt)->drm, "PL1 disable is not supported!\n"); ret = -EOPNOTSUPP; - goto unlock; } + goto unlock; }
/* Computation in 64-bits to avoid overflow. Round to nearest. */ diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index 615bbc372ac6..d7bf7bc9dc14 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -1354,7 +1354,10 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc) if (!snapshot) return NULL;
- snapshot->context_desc = lower_32_bits(xe_lrc_ggtt_addr(lrc)); + if (lrc->bo && lrc->bo->vm) + xe_vm_get(lrc->bo->vm); + + snapshot->context_desc = xe_lrc_ggtt_addr(lrc); snapshot->head = xe_lrc_ring_head(lrc); snapshot->tail.internal = lrc->ring.tail; snapshot->tail.memory = xe_lrc_read_ctx_reg(lrc, CTX_RING_TAIL); @@ -1370,12 +1373,14 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc) void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot) { struct xe_bo *bo; + struct xe_vm *vm; struct iosys_map src;
if (!snapshot) return;
bo = snapshot->lrc_bo; + vm = bo->vm; snapshot->lrc_bo = NULL;
snapshot->lrc_snapshot = kvmalloc(snapshot->lrc_size, GFP_KERNEL); @@ -1395,6 +1400,8 @@ void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot) dma_resv_unlock(bo->ttm.base.resv); put_bo: xe_bo_put(bo); + if (vm) + xe_vm_put(vm); }
void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer *p) @@ -1440,7 +1447,13 @@ void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot) return;
kvfree(snapshot->lrc_snapshot); - if (snapshot->lrc_bo) + if (snapshot->lrc_bo) { + struct xe_vm *vm; + + vm = snapshot->lrc_bo->vm; xe_bo_put(snapshot->lrc_bo); + if (vm) + xe_vm_put(vm); + } kfree(snapshot); } diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c index 7d50c6e89d8e..5b243b7feb59 100644 --- a/drivers/gpu/drm/xe/xe_preempt_fence.c +++ b/drivers/gpu/drm/xe/xe_preempt_fence.c @@ -23,11 +23,19 @@ static void preempt_fence_work_func(struct work_struct *w) q->ops->suspend_wait(q);
dma_fence_signal(&pfence->base); - dma_fence_end_signalling(cookie); - + /* + * Opt for keep everything in the fence critical section. This looks really strange since we + * have just signalled the fence, however the preempt fences are all signalled via single + * global ordered-wq, therefore anything that happens in this callback can easily block + * progress on the entire wq, which itself may prevent other published preempt fences from + * ever signalling. Therefore try to keep everything here in the callback in the fence + * critical section. For example if something below grabs a scary lock like vm->lock, + * lockdep should complain since we also hold that lock whilst waiting on preempt fences to + * complete. + */ xe_vm_queue_rebind_worker(q->vm); - xe_exec_queue_put(q); + dma_fence_end_signalling(cookie); }
static const char * diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c index fb44cc7521d8..10326bd1bfa3 100644 --- a/drivers/gpu/drm/xe/xe_rtp.c +++ b/drivers/gpu/drm/xe/xe_rtp.c @@ -200,7 +200,7 @@ static void rtp_mark_active(struct xe_device *xe, if (first == last) bitmap_set(ctx->active_entries, first, 1); else - bitmap_set(ctx->active_entries, first, last - first + 2); + bitmap_set(ctx->active_entries, first, last - first + 1); }
/** diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c index 65f1f1628235..2bfff998458b 100644 --- a/drivers/gpu/drm/xe/xe_sync.c +++ b/drivers/gpu/drm/xe/xe_sync.c @@ -263,7 +263,7 @@ void xe_sync_entry_cleanup(struct xe_sync_entry *sync) if (sync->fence) dma_fence_put(sync->fence); if (sync->chain_fence) - dma_fence_put(&sync->chain_fence->base); + dma_fence_chain_free(sync->chain_fence); if (sync->ufence) user_fence_put(sync->ufence); } diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c index 2c7c92272fe3..f8f22b8a67cd 100644 --- a/drivers/hwmon/corsair-psu.c +++ b/drivers/hwmon/corsair-psu.c @@ -875,15 +875,16 @@ static const struct hid_device_id corsairpsu_idtable[] = { { HID_USB_DEVICE(0x1b1c, 0x1c04) }, /* Corsair HX650i */ { HID_USB_DEVICE(0x1b1c, 0x1c05) }, /* Corsair HX750i */ { HID_USB_DEVICE(0x1b1c, 0x1c06) }, /* Corsair HX850i */ - { HID_USB_DEVICE(0x1b1c, 0x1c07) }, /* Corsair HX1000i Series 2022 */ - { HID_USB_DEVICE(0x1b1c, 0x1c08) }, /* Corsair HX1200i */ + { HID_USB_DEVICE(0x1b1c, 0x1c07) }, /* Corsair HX1000i Legacy */ + { HID_USB_DEVICE(0x1b1c, 0x1c08) }, /* Corsair HX1200i Legacy */ { HID_USB_DEVICE(0x1b1c, 0x1c09) }, /* Corsair RM550i */ { HID_USB_DEVICE(0x1b1c, 0x1c0a) }, /* Corsair RM650i */ { HID_USB_DEVICE(0x1b1c, 0x1c0b) }, /* Corsair RM750i */ { HID_USB_DEVICE(0x1b1c, 0x1c0c) }, /* Corsair RM850i */ { HID_USB_DEVICE(0x1b1c, 0x1c0d) }, /* Corsair RM1000i */ { HID_USB_DEVICE(0x1b1c, 0x1c1e) }, /* Corsair HX1000i Series 2023 */ - { HID_USB_DEVICE(0x1b1c, 0x1c1f) }, /* Corsair HX1500i Series 2022 and 2023 */ + { HID_USB_DEVICE(0x1b1c, 0x1c1f) }, /* Corsair HX1500i Legacy and Series 2023 */ + { HID_USB_DEVICE(0x1b1c, 0x1c23) }, /* Corsair HX1200i Series 2023 */ { }, }; MODULE_DEVICE_TABLE(hid, corsairpsu_idtable); diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 0a8b95ce35f7..365e37bba0f3 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -990,8 +990,11 @@ static int __maybe_unused geni_i2c_runtime_resume(struct device *dev) return ret;
ret = geni_se_resources_on(&gi2c->se); - if (ret) + if (ret) { + clk_disable_unprepare(gi2c->core_clk); + geni_icc_disable(&gi2c->se); return ret; + }
enable_irq(gi2c->irq); gi2c->suspended = 0; diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c index 97f338b123b1..25bc7b8d98f0 100644 --- a/drivers/i2c/i2c-smbus.c +++ b/drivers/i2c/i2c-smbus.c @@ -34,6 +34,7 @@ static int smbus_do_alert(struct device *dev, void *addrp) struct i2c_client *client = i2c_verify_client(dev); struct alert_data *data = addrp; struct i2c_driver *driver; + int ret;
if (!client || client->addr != data->addr) return 0; @@ -47,16 +48,47 @@ static int smbus_do_alert(struct device *dev, void *addrp) device_lock(dev); if (client->dev.driver) { driver = to_i2c_driver(client->dev.driver); - if (driver->alert) + if (driver->alert) { + /* Stop iterating after we find the device */ driver->alert(client, data->type, data->data); - else + ret = -EBUSY; + } else { dev_warn(&client->dev, "no driver alert()!\n"); - } else + ret = -EOPNOTSUPP; + } + } else { dev_dbg(&client->dev, "alert with no driver\n"); + ret = -ENODEV; + } + device_unlock(dev); + + return ret; +} + +/* Same as above, but call back all drivers with alert handler */ + +static int smbus_do_alert_force(struct device *dev, void *addrp) +{ + struct i2c_client *client = i2c_verify_client(dev); + struct alert_data *data = addrp; + struct i2c_driver *driver; + + if (!client || (client->flags & I2C_CLIENT_TEN)) + return 0; + + /* + * Drivers should either disable alerts, or provide at least + * a minimal handler. Lock so the driver won't change. + */ + device_lock(dev); + if (client->dev.driver) { + driver = to_i2c_driver(client->dev.driver); + if (driver->alert) + driver->alert(client, data->type, data->data); + } device_unlock(dev);
- /* Stop iterating after we find the device */ - return -EBUSY; + return 0; }
/* @@ -67,6 +99,7 @@ static irqreturn_t smbus_alert(int irq, void *d) { struct i2c_smbus_alert *alert = d; struct i2c_client *ara; + unsigned short prev_addr = I2C_CLIENT_END; /* Not a valid address */
ara = alert->ara;
@@ -94,8 +127,25 @@ static irqreturn_t smbus_alert(int irq, void *d) data.addr, data.data);
/* Notify driver for the device which issued the alert */ - device_for_each_child(&ara->adapter->dev, &data, - smbus_do_alert); + status = device_for_each_child(&ara->adapter->dev, &data, + smbus_do_alert); + /* + * If we read the same address more than once, and the alert + * was not handled by a driver, it won't do any good to repeat + * the loop because it will never terminate. Try again, this + * time calling the alert handlers of all devices connected to + * the bus, and abort the loop afterwards. If this helps, we + * are all set. If it doesn't, there is nothing else we can do, + * so we might as well abort the loop. + * Note: This assumes that a driver with alert handler handles + * the alert properly and clears it if necessary. + */ + if (data.addr == prev_addr && status != -EBUSY) { + device_for_each_child(&ara->adapter->dev, &data, + smbus_do_alert_force); + break; + } + prev_addr = data.addr; }
return IRQ_HANDLED; diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c index 9d8f2c406043..b35903a06902 100644 --- a/drivers/irqchip/irq-loongarch-cpu.c +++ b/drivers/irqchip/irq-loongarch-cpu.c @@ -18,11 +18,13 @@ struct fwnode_handle *cpuintc_handle;
static u32 lpic_gsi_to_irq(u32 gsi) { + int irq = 0; + /* Only pch irqdomain transferring is required for LoongArch. */ if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ) - return acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH); + irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
- return 0; + return (irq > 0) ? irq : 0; }
static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi) diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index 58881d313979..244a8d489cac 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -64,6 +64,20 @@ struct mbigen_device { void __iomem *base; };
+static inline unsigned int get_mbigen_node_offset(unsigned int nid) +{ + unsigned int offset = nid * MBIGEN_NODE_OFFSET; + + /* + * To avoid touched clear register in unexpected way, we need to directly + * skip clear register when access to more than 10 mbigen nodes. + */ + if (nid >= (REG_MBIGEN_CLEAR_OFFSET / MBIGEN_NODE_OFFSET)) + offset += MBIGEN_NODE_OFFSET; + + return offset; +} + static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq) { unsigned int nid, pin; @@ -72,8 +86,7 @@ static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq) nid = hwirq / IRQS_PER_MBIGEN_NODE + 1; pin = hwirq % IRQS_PER_MBIGEN_NODE;
- return pin * 4 + nid * MBIGEN_NODE_OFFSET - + REG_MBIGEN_VEC_OFFSET; + return pin * 4 + get_mbigen_node_offset(nid) + REG_MBIGEN_VEC_OFFSET; }
static inline void get_mbigen_type_reg(irq_hw_number_t hwirq, @@ -88,8 +101,7 @@ static inline void get_mbigen_type_reg(irq_hw_number_t hwirq, *mask = 1 << (irq_ofst % 32); ofst = irq_ofst / 32 * 4;
- *addr = ofst + nid * MBIGEN_NODE_OFFSET - + REG_MBIGEN_TYPE_OFFSET; + *addr = ofst + get_mbigen_node_offset(nid) + REG_MBIGEN_TYPE_OFFSET; }
static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq, diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c index 9a1791908598..a4c3b57098ba 100644 --- a/drivers/irqchip/irq-meson-gpio.c +++ b/drivers/irqchip/irq-meson-gpio.c @@ -178,7 +178,7 @@ struct meson_gpio_irq_controller { void __iomem *base; u32 channel_irqs[MAX_NUM_CHANNEL]; DECLARE_BITMAP(channel_map, MAX_NUM_CHANNEL); - spinlock_t lock; + raw_spinlock_t lock; };
static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl, @@ -187,14 +187,14 @@ static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl, unsigned long flags; u32 tmp;
- spin_lock_irqsave(&ctl->lock, flags); + raw_spin_lock_irqsave(&ctl->lock, flags);
tmp = readl_relaxed(ctl->base + reg); tmp &= ~mask; tmp |= val; writel_relaxed(tmp, ctl->base + reg);
- spin_unlock_irqrestore(&ctl->lock, flags); + raw_spin_unlock_irqrestore(&ctl->lock, flags); }
static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl) @@ -244,12 +244,12 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, unsigned long flags; unsigned int idx;
- spin_lock_irqsave(&ctl->lock, flags); + raw_spin_lock_irqsave(&ctl->lock, flags);
/* Find a free channel */ idx = find_first_zero_bit(ctl->channel_map, ctl->params->nr_channels); if (idx >= ctl->params->nr_channels) { - spin_unlock_irqrestore(&ctl->lock, flags); + raw_spin_unlock_irqrestore(&ctl->lock, flags); pr_err("No channel available\n"); return -ENOSPC; } @@ -257,7 +257,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, /* Mark the channel as used */ set_bit(idx, ctl->channel_map);
- spin_unlock_irqrestore(&ctl->lock, flags); + raw_spin_unlock_irqrestore(&ctl->lock, flags);
/* * Setup the mux of the channel to route the signal of the pad @@ -567,7 +567,7 @@ static int meson_gpio_irq_of_init(struct device_node *node, struct device_node * if (!ctl) return -ENOMEM;
- spin_lock_init(&ctl->lock); + raw_spin_lock_init(&ctl->lock);
ctl->base = of_iomap(node, 0); if (!ctl->base) { diff --git a/drivers/irqchip/irq-riscv-aplic-msi.c b/drivers/irqchip/irq-riscv-aplic-msi.c index 028444af48bd..d7773f76e5d0 100644 --- a/drivers/irqchip/irq-riscv-aplic-msi.c +++ b/drivers/irqchip/irq-riscv-aplic-msi.c @@ -32,15 +32,10 @@ static void aplic_msi_irq_unmask(struct irq_data *d) aplic_irq_unmask(d); }
-static void aplic_msi_irq_eoi(struct irq_data *d) +static void aplic_msi_irq_retrigger_level(struct irq_data *d) { struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
- /* - * EOI handling is required only for level-triggered interrupts - * when APLIC is in MSI mode. - */ - switch (irqd_get_trigger_type(d)) { case IRQ_TYPE_LEVEL_LOW: case IRQ_TYPE_LEVEL_HIGH: @@ -59,6 +54,29 @@ static void aplic_msi_irq_eoi(struct irq_data *d) } }
+static void aplic_msi_irq_eoi(struct irq_data *d) +{ + /* + * EOI handling is required only for level-triggered interrupts + * when APLIC is in MSI mode. + */ + aplic_msi_irq_retrigger_level(d); +} + +static int aplic_msi_irq_set_type(struct irq_data *d, unsigned int type) +{ + int rc = aplic_irq_set_type(d, type); + + if (rc) + return rc; + /* + * Updating sourcecfg register for level-triggered interrupts + * requires interrupt retriggering when APLIC is in MSI mode. + */ + aplic_msi_irq_retrigger_level(d); + return 0; +} + static void aplic_msi_write_msg(struct irq_data *d, struct msi_msg *msg) { unsigned int group_index, hart_index, guest_index, val; @@ -130,7 +148,7 @@ static const struct msi_domain_template aplic_msi_template = { .name = "APLIC-MSI", .irq_mask = aplic_msi_irq_mask, .irq_unmask = aplic_msi_irq_unmask, - .irq_set_type = aplic_irq_set_type, + .irq_set_type = aplic_msi_irq_set_type, .irq_eoi = aplic_msi_irq_eoi, #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c index 238d3d344949..7e08714d507f 100644 --- a/drivers/irqchip/irq-xilinx-intc.c +++ b/drivers/irqchip/irq-xilinx-intc.c @@ -189,7 +189,7 @@ static int __init xilinx_intc_of_init(struct device_node *intc, irqc->intr_mask = 0; }
- if (irqc->intr_mask >> irqc->nr_irq) + if ((u64)irqc->intr_mask >> irqc->nr_irq) pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n", diff --git a/drivers/md/md.c b/drivers/md/md.c index 9c5be016e507..a5b5801baa9e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -479,7 +479,6 @@ int mddev_suspend(struct mddev *mddev, bool interruptible) */ WRITE_ONCE(mddev->suspended, mddev->suspended + 1);
- del_timer_sync(&mddev->safemode_timer); /* restrict memory reclaim I/O during raid array is suspend */ mddev->noio_flag = memalloc_noio_save();
@@ -8639,12 +8638,12 @@ EXPORT_SYMBOL(md_done_sync); * A return value of 'false' means that the write wasn't recorded * and cannot proceed as the array is being suspend. */ -bool md_write_start(struct mddev *mddev, struct bio *bi) +void md_write_start(struct mddev *mddev, struct bio *bi) { int did_change = 0;
if (bio_data_dir(bi) != WRITE) - return true; + return;
BUG_ON(mddev->ro == MD_RDONLY); if (mddev->ro == MD_AUTO_READ) { @@ -8677,15 +8676,9 @@ bool md_write_start(struct mddev *mddev, struct bio *bi) if (did_change) sysfs_notify_dirent_safe(mddev->sysfs_state); if (!mddev->has_superblocks) - return true; + return; wait_event(mddev->sb_wait, - !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || - is_md_suspended(mddev)); - if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { - percpu_ref_put(&mddev->writes_pending); - return false; - } - return true; + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); } EXPORT_SYMBOL(md_write_start);
diff --git a/drivers/md/md.h b/drivers/md/md.h index ca085ecad504..487582058f74 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -785,7 +785,7 @@ extern void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **t extern void md_wakeup_thread(struct md_thread __rcu *thread); extern void md_check_recovery(struct mddev *mddev); extern void md_reap_sync_thread(struct mddev *mddev); -extern bool md_write_start(struct mddev *mddev, struct bio *bi); +extern void md_write_start(struct mddev *mddev, struct bio *bi); extern void md_write_inc(struct mddev *mddev, struct bio *bi); extern void md_write_end(struct mddev *mddev); extern void md_done_sync(struct mddev *mddev, int blocks, int ok); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 22bbd06ba6a2..5ea57b6748c5 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1688,8 +1688,7 @@ static bool raid1_make_request(struct mddev *mddev, struct bio *bio) if (bio_data_dir(bio) == READ) raid1_read_request(mddev, bio, sectors, NULL); else { - if (!md_write_start(mddev,bio)) - return false; + md_write_start(mddev,bio); raid1_write_request(mddev, bio, sectors); } return true; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index a4556d2e46bf..f8d7c02c6ed5 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1836,8 +1836,7 @@ static bool raid10_make_request(struct mddev *mddev, struct bio *bio) && md_flush_request(mddev, bio)) return true;
- if (!md_write_start(mddev, bio)) - return false; + md_write_start(mddev, bio);
if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) if (!raid10_handle_discard(mddev, bio)) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 1c6b58adec13..ff9f4751c096 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6097,8 +6097,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) ctx.do_flush = bi->bi_opf & REQ_PREFLUSH; }
- if (!md_write_start(mddev, bi)) - return false; + md_write_start(mddev, bi); /* * If array is degraded, better not do chunk aligned read because * later we might have to read it again in order to reconstruct @@ -6273,7 +6272,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk safepos = conf->reshape_safe; sector_div(safepos, data_disks); if (mddev->reshape_backwards) { - BUG_ON(writepos < reshape_sectors); + if (WARN_ON(writepos < reshape_sectors)) + return MaxSector; + writepos -= reshape_sectors; readpos += reshape_sectors; safepos += reshape_sectors; @@ -6291,14 +6292,18 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk * to set 'stripe_addr' which is where we will write to. */ if (mddev->reshape_backwards) { - BUG_ON(conf->reshape_progress == 0); + if (WARN_ON(conf->reshape_progress == 0)) + return MaxSector; + stripe_addr = writepos; - BUG_ON((mddev->dev_sectors & - ~((sector_t)reshape_sectors - 1)) - - reshape_sectors - stripe_addr - != sector_nr); + if (WARN_ON((mddev->dev_sectors & + ~((sector_t)reshape_sectors - 1)) - + reshape_sectors - stripe_addr != sector_nr)) + return MaxSector; } else { - BUG_ON(writepos != sector_nr + reshape_sectors); + if (WARN_ON(writepos != sector_nr + reshape_sectors)) + return MaxSector; + stripe_addr = sector_nr; }
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c index 7e1ecdf2485f..0fb4d7bff9d1 100644 --- a/drivers/media/i2c/ov5647.c +++ b/drivers/media/i2c/ov5647.c @@ -1360,24 +1360,21 @@ static int ov5647_parse_dt(struct ov5647 *sensor, struct device_node *np) struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = V4L2_MBUS_CSI2_DPHY, }; - struct device_node *ep; + struct device_node *ep __free(device_node) = + of_graph_get_endpoint_by_regs(np, 0, -1); int ret;
- ep = of_graph_get_endpoint_by_regs(np, 0, -1); if (!ep) return -EINVAL;
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &bus_cfg); if (ret) - goto out; + return ret;
sensor->clock_ncont = bus_cfg.bus.mipi_csi2.flags & V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
-out: - of_node_put(ep); - - return ret; + return 0; }
static int ov5647_probe(struct i2c_client *client) diff --git a/drivers/media/pci/intel/ipu6/Kconfig b/drivers/media/pci/intel/ipu6/Kconfig index 154343080c82..40e20f0aa5ae 100644 --- a/drivers/media/pci/intel/ipu6/Kconfig +++ b/drivers/media/pci/intel/ipu6/Kconfig @@ -3,13 +3,14 @@ config VIDEO_INTEL_IPU6 depends on ACPI || COMPILE_TEST depends on VIDEO_DEV depends on X86 && X86_64 && HAS_DMA + depends on IPU_BRIDGE || !IPU_BRIDGE + select AUXILIARY_BUS select DMA_OPS select IOMMU_IOVA select VIDEO_V4L2_SUBDEV_API select MEDIA_CONTROLLER select VIDEOBUF2_DMA_CONTIG select V4L2_FWNODE - select IPU_BRIDGE help This is the 6th Gen Intel Image Processing Unit, found in Intel SoCs and used for capturing images and video from camera sensors. diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c index a57f9f4f3b87..6a38a0fa0e2d 100644 --- a/drivers/media/platform/amphion/vdec.c +++ b/drivers/media/platform/amphion/vdec.c @@ -195,7 +195,6 @@ static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl) struct vdec_t *vdec = inst->priv; int ret = 0;
- vpu_inst_lock(inst); switch (ctrl->id) { case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE: vdec->params.display_delay_enable = ctrl->val; @@ -207,7 +206,6 @@ static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl) ret = -EINVAL; break; } - vpu_inst_unlock(inst);
return ret; } diff --git a/drivers/media/platform/amphion/venc.c b/drivers/media/platform/amphion/venc.c index 4eb57d793a9c..16ed4d21519c 100644 --- a/drivers/media/platform/amphion/venc.c +++ b/drivers/media/platform/amphion/venc.c @@ -518,7 +518,6 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl) struct venc_t *venc = inst->priv; int ret = 0;
- vpu_inst_lock(inst); switch (ctrl->id) { case V4L2_CID_MPEG_VIDEO_H264_PROFILE: venc->params.profile = ctrl->val; @@ -579,7 +578,6 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl) ret = -EINVAL; break; } - vpu_inst_unlock(inst);
return ret; } diff --git a/drivers/media/tuners/xc2028.c b/drivers/media/tuners/xc2028.c index 5a967edceca9..352b8a3679b7 100644 --- a/drivers/media/tuners/xc2028.c +++ b/drivers/media/tuners/xc2028.c @@ -1361,9 +1361,16 @@ static void load_firmware_cb(const struct firmware *fw, void *context) { struct dvb_frontend *fe = context; - struct xc2028_data *priv = fe->tuner_priv; + struct xc2028_data *priv; int rc;
+ if (!fe) { + pr_warn("xc2028: No frontend in %s\n", __func__); + return; + } + + priv = fe->tuner_priv; + tuner_dbg("request_firmware_nowait(): %s\n", fw ? "OK" : "error"); if (!fw) { tuner_err("Could not load firmware %s.\n", priv->fname); diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index 51f4f653b983..5bebe1460a9f 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -214,13 +214,13 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream, * Compute a bandwidth estimation by multiplying the frame * size by the number of video frames per second, divide the * result by the number of USB frames (or micro-frames for - * high-speed devices) per second and add the UVC header size - * (assumed to be 12 bytes long). + * high- and super-speed devices) per second and add the UVC + * header size (assumed to be 12 bytes long). */ bandwidth = frame->wWidth * frame->wHeight / 8 * format->bpp; bandwidth *= 10000000 / interval + 1; bandwidth /= 1000; - if (stream->dev->udev->speed == USB_SPEED_HIGH) + if (stream->dev->udev->speed >= USB_SPEED_HIGH) bandwidth /= 8; bandwidth += 12;
@@ -478,6 +478,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf, ktime_t time; u16 host_sof; u16 dev_sof; + u32 dev_stc;
switch (data[1] & (UVC_STREAM_PTS | UVC_STREAM_SCR)) { case UVC_STREAM_PTS | UVC_STREAM_SCR: @@ -526,6 +527,34 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf, if (dev_sof == stream->clock.last_sof) return;
+ dev_stc = get_unaligned_le32(&data[header_size - 6]); + + /* + * STC (Source Time Clock) is the clock used by the camera. The UVC 1.5 + * standard states that it "must be captured when the first video data + * of a video frame is put on the USB bus". This is generally understood + * as requiring devices to clear the payload header's SCR bit before + * the first packet containing video data. + * + * Most vendors follow that interpretation, but some (namely SunplusIT + * on some devices) always set the `UVC_STREAM_SCR` bit, fill the SCR + * field with 0's,and expect that the driver only processes the SCR if + * there is data in the packet. + * + * Ignore all the hardware timestamp information if we haven't received + * any data for this frame yet, the packet contains no data, and both + * STC and SOF are zero. This heuristics should be safe on compliant + * devices. This should be safe with compliant devices, as in the very + * unlikely case where a UVC 1.1 device would send timing information + * only before the first packet containing data, and both STC and SOF + * happen to be zero for a particular frame, we would only miss one + * clock sample from many and the clock recovery algorithm wouldn't + * suffer from this condition. + */ + if (buf && buf->bytesused == 0 && len == header_size && + dev_stc == 0 && dev_sof == 0) + return; + stream->clock.last_sof = dev_sof;
host_sof = usb_get_current_frame_number(stream->dev->udev); @@ -575,7 +604,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf, spin_lock_irqsave(&stream->clock.lock, flags);
sample = &stream->clock.samples[stream->clock.head]; - sample->dev_stc = get_unaligned_le32(&data[header_size - 6]); + sample->dev_stc = dev_stc; sample->dev_sof = dev_sof; sample->host_sof = host_sof; sample->host_time = time; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c index bfe4caa0c99d..4cb79a4f2461 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c @@ -485,6 +485,8 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv) clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags); }
+ tx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(tx_ring->obj_num) - + ilog2(tx_ring->obj_num); tx_ring->obj_size = tx_obj_size;
rem = priv->rx_obj_num; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c index e5bd57b65aaf..5b0c7890d4b4 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c @@ -2,7 +2,7 @@ // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // -// Copyright (c) 2019, 2020, 2021 Pengutronix, +// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix, // Marc Kleine-Budde kernel@pengutronix.de // // Based on: @@ -16,6 +16,11 @@
#include "mcp251xfd.h"
+static inline bool mcp251xfd_tx_fifo_sta_full(u32 fifo_sta) +{ + return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF); +} + static inline int mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv, u8 *tef_tail) @@ -55,56 +60,39 @@ static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv) return 0; }
-static int -mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq) -{ - const struct mcp251xfd_tx_ring *tx_ring = priv->tx; - u32 tef_sta; - int err; - - err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta); - if (err) - return err; - - if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) { - netdev_err(priv->ndev, - "Transmit Event FIFO buffer overflow.\n"); - return -ENOBUFS; - } - - netdev_info(priv->ndev, - "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n", - tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ? - "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ? - "not empty" : "empty", - seq, priv->tef->tail, priv->tef->head, tx_ring->head); - - /* The Sequence Number in the TEF doesn't match our tef_tail. */ - return -EAGAIN; -} - static int mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, const struct mcp251xfd_hw_tef_obj *hw_tef_obj, unsigned int *frame_len_ptr) { struct net_device_stats *stats = &priv->ndev->stats; + u32 seq, tef_tail_masked, tef_tail; struct sk_buff *skb; - u32 seq, seq_masked, tef_tail_masked, tef_tail;
- seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, + /* Use the MCP2517FD mask on the MCP2518FD, too. We only + * compare 7 bits, this is enough to detect old TEF objects. + */ + seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK, hw_tef_obj->flags); - - /* Use the MCP2517FD mask on the MCP2518FD, too. We only - * compare 7 bits, this should be enough to detect - * net-yet-completed, i.e. old TEF objects. - */ - seq_masked = seq & - field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); tef_tail_masked = priv->tef->tail & field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); - if (seq_masked != tef_tail_masked) - return mcp251xfd_handle_tefif_recover(priv, seq); + + /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI + * bits of a FIFOSTA register, here the TX FIFO tail index + * might be corrupted and we might process past the TEF FIFO's + * head into old CAN frames. + * + * Compare the sequence number of the currently processed CAN + * frame with the expected sequence number. Abort with + * -EBADMSG if an old CAN frame is detected. + */ + if (seq != tef_tail_masked) { + netdev_dbg(priv->ndev, "%s: chip=0x%02x ring=0x%02x\n", __func__, + seq, tef_tail_masked); + stats->tx_fifo_errors++; + + return -EBADMSG; + }
tef_tail = mcp251xfd_get_tef_tail(priv); skb = priv->can.echo_skb[tef_tail]; @@ -120,28 +108,44 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, return 0; }
-static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv) +static int +mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p) { const struct mcp251xfd_tx_ring *tx_ring = priv->tx; - unsigned int new_head; - u8 chip_tx_tail; + const u8 shift = tx_ring->obj_num_shift_to_u8; + u8 chip_tx_tail, tail, len; + u32 fifo_sta; int err;
- err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail); + err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(priv->tx->fifo_nr), + &fifo_sta); if (err) return err;
- /* chip_tx_tail, is the next TX-Object send by the HW. - * The new TEF head must be >= the old head, ... + if (mcp251xfd_tx_fifo_sta_full(fifo_sta)) { + *len_p = tx_ring->obj_num; + return 0; + } + + chip_tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); + + err = mcp251xfd_check_tef_tail(priv); + if (err) + return err; + tail = mcp251xfd_get_tef_tail(priv); + + /* First shift to full u8. The subtraction works on signed + * values, that keeps the difference steady around the u8 + * overflow. The right shift acts on len, which is an u8. */ - new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail; - if (new_head <= priv->tef->head) - new_head += tx_ring->obj_num; + BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(chip_tx_tail)); + BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(tail)); + BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len));
- /* ... but it cannot exceed the TX head. */ - priv->tef->head = min(new_head, tx_ring->head); + len = (chip_tx_tail << shift) - (tail << shift); + *len_p = len >> shift;
- return mcp251xfd_check_tef_tail(priv); + return 0; }
static inline int @@ -182,13 +186,12 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) u8 tef_tail, len, l; int err, i;
- err = mcp251xfd_tef_ring_update(priv); + err = mcp251xfd_get_tef_len(priv, &len); if (err) return err;
tef_tail = mcp251xfd_get_tef_tail(priv); - len = mcp251xfd_get_tef_len(priv); - l = mcp251xfd_get_tef_linear_len(priv); + l = mcp251xfd_get_tef_linear_len(priv, len); err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l); if (err) return err; @@ -203,12 +206,12 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) unsigned int frame_len = 0;
err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len); - /* -EAGAIN means the Sequence Number in the TEF - * doesn't match our tef_tail. This can happen if we - * read the TEF objects too early. Leave loop let the - * interrupt handler call us again. + /* -EBADMSG means we're affected by mcp2518fd erratum + * DS80000789E 6., i.e. the Sequence Number in the TEF + * doesn't match our tef_tail. Don't process any + * further and mark processed frames as good. */ - if (err == -EAGAIN) + if (err == -EBADMSG) goto out_netif_wake_queue; if (err) return err; @@ -223,6 +226,8 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) struct mcp251xfd_tx_ring *tx_ring = priv->tx; int offset;
+ ring->head += len; + /* Increment the TEF FIFO tail pointer 'len' times in * a single SPI message. * diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h index b35bfebd23f2..4628bf847bc9 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h @@ -524,6 +524,7 @@ struct mcp251xfd_tef_ring {
/* u8 obj_num equals tx_ring->obj_num */ /* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */ + /* u8 obj_num_shift_to_u8 equals tx_ring->obj_num_shift_to_u8 */
union mcp251xfd_write_reg_buf irq_enable_buf; struct spi_transfer irq_enable_xfer; @@ -542,6 +543,7 @@ struct mcp251xfd_tx_ring { u8 nr; u8 fifo_nr; u8 obj_num; + u8 obj_num_shift_to_u8; u8 obj_size;
struct mcp251xfd_tx_obj obj[MCP251XFD_TX_OBJ_NUM_MAX]; @@ -861,17 +863,8 @@ static inline u8 mcp251xfd_get_tef_tail(const struct mcp251xfd_priv *priv) return priv->tef->tail & (priv->tx->obj_num - 1); }
-static inline u8 mcp251xfd_get_tef_len(const struct mcp251xfd_priv *priv) +static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv, u8 len) { - return priv->tef->head - priv->tef->tail; -} - -static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv) -{ - u8 len; - - len = mcp251xfd_get_tef_len(priv); - return min_t(u8, len, priv->tx->obj_num - mcp251xfd_get_tef_tail(priv)); }
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index ed1e6560df25..0e663ec0c12a 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -675,8 +675,10 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) of_remove_property(child, prop);
phydev = of_phy_find_device(child); - if (phydev) + if (phydev) { phy_device_remove(phydev); + phy_device_free(phydev); + } }
err = mdiobus_register(priv->user_mii_bus); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index baa1eeb9a1b0..3103e1b32d0b 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -2578,7 +2578,11 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port) if (!port) return MICREL_KSZ8_P1_ERRATA; break; + case KSZ8567_CHIP_ID: case KSZ9477_CHIP_ID: + case KSZ9567_CHIP_ID: + case KSZ9896_CHIP_ID: + case KSZ9897_CHIP_ID: /* KSZ9477 Errata DS80000754C * * Module 4: Energy Efficient Ethernet (EEE) feature select must @@ -2588,6 +2592,13 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port) * controls. If not disabled, the PHY ports can auto-negotiate * to enable EEE, and this feature can cause link drops when * linked to another device supporting EEE. + * + * The same item appears in the errata for the KSZ9567, KSZ9896, + * and KSZ9897. + * + * A similar item appears in the errata for the KSZ8567, but + * provides an alternative workaround. For now, use the simple + * workaround of disabling the EEE feature for this device too. */ return MICREL_NO_EEE; } @@ -3763,6 +3774,11 @@ static int ksz_port_set_mac_address(struct dsa_switch *ds, int port, return -EBUSY; }
+ /* Need to initialize variable as the code to fill in settings may + * not be executed. + */ + wol.wolopts = 0; + ksz_get_wol(ds, dp->index, &wol); if (wol.wolopts & WAKE_MAGIC) { dev_err(ds->dev, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 23627c973e40..a2d672a698e3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7433,19 +7433,20 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp) int rx = bp->rx_nr_rings, stat; int vnic, grp = rx;
- if (hw_resc->resv_tx_rings != bp->tx_nr_rings && - bp->hwrm_spec_code >= 0x10601) - return true; - /* Old firmware does not need RX ring reservations but we still * need to setup a default RSS map when needed. With new firmware * we go through RX ring reservations first and then set up the * RSS map for the successfully reserved RX rings when needed. */ - if (!BNXT_NEW_RM(bp)) { + if (!BNXT_NEW_RM(bp)) bnxt_check_rss_tbl_no_rmgr(bp); + + if (hw_resc->resv_tx_rings != bp->tx_nr_rings && + bp->hwrm_spec_code >= 0x10601) + return true; + + if (!BNXT_NEW_RM(bp)) return false; - }
vnic = bnxt_get_total_vnics(bp, rx);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 1248792d7fd4..0715ea5bf13e 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -42,19 +42,15 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) struct bcmgenet_priv *priv = netdev_priv(dev); struct device *kdev = &priv->pdev->dev;
- if (dev->phydev) { + if (dev->phydev) phy_ethtool_get_wol(dev->phydev, wol); - if (wol->supported) - return; - }
- if (!device_can_wakeup(kdev)) { - wol->supported = 0; - wol->wolopts = 0; + /* MAC is not wake-up capable, return what the PHY does */ + if (!device_can_wakeup(kdev)) return; - }
- wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; + /* Overlay MAC capabilities with that of the PHY queried before */ + wol->supported |= WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; wol->wolopts = priv->wolopts; memset(wol->sopass, 0, sizeof(wol->sopass));
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index e32f6724f568..2e4f3e1782a2 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -775,6 +775,9 @@ void fec_ptp_stop(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev);
+ if (fep->pps_enable) + fec_ptp_enable_pps(fep, 0); + cancel_delayed_work_sync(&fep->time_keep); hrtimer_cancel(&fep->perout_timer); if (fep->ptp_clock) diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index fe1741d482b4..cf816ede05f6 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -492,7 +492,7 @@ static int gve_set_channels(struct net_device *netdev, return -EINVAL; }
- if (!netif_carrier_ok(netdev)) { + if (!netif_running(netdev)) { priv->tx_cfg.num_queues = new_tx; priv->rx_cfg.num_queues = new_rx; return 0; diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index cabf7d4bcecb..8b14efd14a50 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1511,7 +1511,7 @@ static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog, u32 status;
old_prog = READ_ONCE(priv->xdp_prog); - if (!netif_carrier_ok(priv->dev)) { + if (!netif_running(priv->dev)) { WRITE_ONCE(priv->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); @@ -1784,7 +1784,7 @@ int gve_adjust_queues(struct gve_priv *priv, rx_alloc_cfg.qcfg = &new_rx_config; tx_alloc_cfg.num_rings = new_tx_config.num_queues;
- if (netif_carrier_ok(priv->dev)) { + if (netif_running(priv->dev)) { err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); return err; } @@ -2001,7 +2001,7 @@ static int gve_set_features(struct net_device *netdev,
if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) { netdev->features ^= NETIF_F_LRO; - if (netif_carrier_ok(netdev)) { + if (netif_running(netdev)) { err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); if (err) { /* Revert the change on error. */ @@ -2290,7 +2290,7 @@ static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
int gve_reset(struct gve_priv *priv, bool attempt_teardown) { - bool was_up = netif_carrier_ok(priv->dev); + bool was_up = netif_running(priv->dev); int err;
dev_info(&priv->pdev->dev, "Performing reset\n"); @@ -2631,7 +2631,7 @@ static void gve_shutdown(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct gve_priv *priv = netdev_priv(netdev); - bool was_up = netif_carrier_ok(priv->dev); + bool was_up = netif_running(priv->dev);
rtnl_lock(); if (was_up && gve_close(priv->dev)) { @@ -2649,7 +2649,7 @@ static int gve_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct gve_priv *priv = netdev_priv(netdev); - bool was_up = netif_carrier_ok(priv->dev); + bool was_up = netif_running(priv->dev);
priv->suspend_cnt++; rtnl_lock(); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 9b075dd48889..f16d13e9ff6e 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -560,6 +560,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) return;
+ synchronize_irq(pf->oicr_irq.virq); + ice_unplug_aux_dev(pf);
/* Notify VFs of impending reset */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index f1ee5584e8fa..3ac9d7ab83f2 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -905,8 +905,8 @@ static void idpf_vport_stop(struct idpf_vport *vport)
vport->link_up = false; idpf_vport_intr_deinit(vport); - idpf_vport_intr_rel(vport); idpf_vport_queues_rel(vport); + idpf_vport_intr_rel(vport); np->state = __IDPF_VPORT_DOWN; }
@@ -1337,9 +1337,8 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport) /** * idpf_vport_open - Bring up a vport * @vport: vport to bring up - * @alloc_res: allocate queue resources */ -static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res) +static int idpf_vport_open(struct idpf_vport *vport) { struct idpf_netdev_priv *np = netdev_priv(vport->netdev); struct idpf_adapter *adapter = vport->adapter; @@ -1352,45 +1351,43 @@ static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res) /* we do not allow interface up just yet */ netif_carrier_off(vport->netdev);
- if (alloc_res) { - err = idpf_vport_queues_alloc(vport); - if (err) - return err; - } - err = idpf_vport_intr_alloc(vport); if (err) { dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n", vport->vport_id, err); - goto queues_rel; + return err; }
+ err = idpf_vport_queues_alloc(vport); + if (err) + goto intr_rel; + err = idpf_vport_queue_ids_init(vport); if (err) { dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n", vport->vport_id, err); - goto intr_rel; + goto queues_rel; }
err = idpf_vport_intr_init(vport); if (err) { dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n", vport->vport_id, err); - goto intr_rel; + goto queues_rel; }
err = idpf_rx_bufs_init_all(vport); if (err) { dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n", vport->vport_id, err); - goto intr_rel; + goto queues_rel; }
err = idpf_queue_reg_init(vport); if (err) { dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n", vport->vport_id, err); - goto intr_rel; + goto queues_rel; }
idpf_rx_init_buf_tail(vport); @@ -1457,10 +1454,10 @@ static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res) idpf_send_map_unmap_queue_vector_msg(vport, false); intr_deinit: idpf_vport_intr_deinit(vport); -intr_rel: - idpf_vport_intr_rel(vport); queues_rel: idpf_vport_queues_rel(vport); +intr_rel: + idpf_vport_intr_rel(vport);
return err; } @@ -1541,7 +1538,7 @@ void idpf_init_task(struct work_struct *work) np = netdev_priv(vport->netdev); np->state = __IDPF_VPORT_DOWN; if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags)) - idpf_vport_open(vport, true); + idpf_vport_open(vport);
/* Spawn and return 'idpf_init_task' work queue until all the * default vports are created @@ -1900,9 +1897,6 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, goto free_vport; }
- err = idpf_vport_queues_alloc(new_vport); - if (err) - goto free_vport; if (current_state <= __IDPF_VPORT_DOWN) { idpf_send_delete_queues_msg(vport); } else { @@ -1974,17 +1968,23 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
err = idpf_set_real_num_queues(vport); if (err) - goto err_reset; + goto err_open;
if (current_state == __IDPF_VPORT_UP) - err = idpf_vport_open(vport, false); + err = idpf_vport_open(vport);
kfree(new_vport);
return err;
err_reset: - idpf_vport_queues_rel(new_vport); + idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq, + vport->num_rxq, vport->num_bufq); + +err_open: + if (current_state == __IDPF_VPORT_UP) + idpf_vport_open(vport); + free_vport: kfree(new_vport);
@@ -2213,7 +2213,7 @@ static int idpf_open(struct net_device *netdev) idpf_vport_ctrl_lock(netdev); vport = idpf_netdev_to_vport(netdev);
- err = idpf_vport_open(vport, true); + err = idpf_vport_open(vport);
idpf_vport_ctrl_unlock(netdev);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index b023704bbbda..20ca04320d4b 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -3436,9 +3436,7 @@ static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport) */ void idpf_vport_intr_rel(struct idpf_vport *vport) { - int i, j, v_idx; - - for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { + for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
kfree(q_vector->bufq); @@ -3449,26 +3447,6 @@ void idpf_vport_intr_rel(struct idpf_vport *vport) q_vector->rx = NULL; }
- /* Clean up the mapping of queues to vectors */ - for (i = 0; i < vport->num_rxq_grp; i++) { - struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; - - if (idpf_is_queue_model_split(vport->rxq_model)) - for (j = 0; j < rx_qgrp->splitq.num_rxq_sets; j++) - rx_qgrp->splitq.rxq_sets[j]->rxq.q_vector = NULL; - else - for (j = 0; j < rx_qgrp->singleq.num_rxq; j++) - rx_qgrp->singleq.rxqs[j]->q_vector = NULL; - } - - if (idpf_is_queue_model_split(vport->txq_model)) - for (i = 0; i < vport->num_txq_grp; i++) - vport->txq_grps[i].complq->q_vector = NULL; - else - for (i = 0; i < vport->num_txq_grp; i++) - for (j = 0; j < vport->txq_grps[i].num_txq; j++) - vport->txq_grps[i].txqs[j]->q_vector = NULL; - kfree(vport->q_vectors); vport->q_vectors = NULL; } @@ -3636,13 +3614,15 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector) /** * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport * @vport: main vport structure - * @basename: name for the vector */ -static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename) +static int idpf_vport_intr_req_irq(struct idpf_vport *vport) { struct idpf_adapter *adapter = vport->adapter; + const char *drv_name, *if_name, *vec_name; int vector, err, irq_num, vidx; - const char *vec_name; + + drv_name = dev_driver_string(&adapter->pdev->dev); + if_name = netdev_name(vport->netdev);
for (vector = 0; vector < vport->num_q_vectors; vector++) { struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; @@ -3659,8 +3639,8 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename) else continue;
- q_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d", - basename, vec_name, vidx); + q_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%s-%d", drv_name, + if_name, vec_name, vidx);
err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0, q_vector->name, q_vector); @@ -4170,7 +4150,6 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport) */ int idpf_vport_intr_init(struct idpf_vport *vport) { - char *int_name; int err;
err = idpf_vport_intr_init_vec_idx(vport); @@ -4184,11 +4163,7 @@ int idpf_vport_intr_init(struct idpf_vport *vport) if (err) goto unroll_vectors_alloc;
- int_name = kasprintf(GFP_KERNEL, "%s-%s", - dev_driver_string(&vport->adapter->pdev->dev), - vport->netdev->name); - - err = idpf_vport_intr_req_irq(vport, int_name); + err = idpf_vport_intr_req_irq(vport); if (err) goto unroll_vectors_alloc;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index b5333da20e8a..cdc84a27a04e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -2374,6 +2374,9 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) return;
+ if (unlikely(!cstrides)) + return; + wq = &rq->mpwqe.wq; wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index c0ced4d315f3..d92f640bae57 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1599,6 +1599,7 @@ static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci, { struct pci_dev *pdev = mlxsw_pci->pdev; char mrsr_pl[MLXSW_REG_MRSR_LEN]; + struct pci_dev *bridge; int err;
if (!pci_reset_sbr_supported) { @@ -1615,6 +1616,9 @@ static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci, sbr: device_lock_assert(&pdev->dev);
+ bridge = pci_upstream_bridge(pdev); + if (bridge) + pci_cfg_access_lock(bridge); pci_cfg_access_lock(pdev); pci_save_state(pdev);
@@ -1624,6 +1628,8 @@ static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci,
pci_restore_state(pdev); pci_cfg_access_unlock(pdev); + if (bridge) + pci_cfg_access_unlock(bridge);
return err; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 466c4002f00d..3a7f3a8b0671 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -21,6 +21,7 @@ #define RGMII_IO_MACRO_CONFIG2 0x1C #define RGMII_IO_MACRO_DEBUG1 0x20 #define EMAC_SYSTEM_LOW_POWER_DEBUG 0x28 +#define EMAC_WRAPPER_SGMII_PHY_CNTRL1 0xf4
/* RGMII_IO_MACRO_CONFIG fields */ #define RGMII_CONFIG_FUNC_CLK_EN BIT(30) @@ -79,6 +80,9 @@ #define ETHQOS_MAC_CTRL_SPEED_MODE BIT(14) #define ETHQOS_MAC_CTRL_PORT_SEL BIT(15)
+/* EMAC_WRAPPER_SGMII_PHY_CNTRL1 bits */ +#define SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN BIT(3) + #define SGMII_10M_RX_CLK_DVDR 0x31
struct ethqos_emac_por { @@ -95,6 +99,7 @@ struct ethqos_emac_driver_data { bool has_integrated_pcs; u32 dma_addr_width; struct dwmac4_addrs dwmac4_addrs; + bool needs_sgmii_loopback; };
struct qcom_ethqos { @@ -114,6 +119,7 @@ struct qcom_ethqos { unsigned int num_por; bool rgmii_config_loopback_en; bool has_emac_ge_3; + bool needs_sgmii_loopback; };
static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset) @@ -191,8 +197,22 @@ ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed) clk_set_rate(ethqos->link_clk, ethqos->link_clk_rate); }
+static void +qcom_ethqos_set_sgmii_loopback(struct qcom_ethqos *ethqos, bool enable) +{ + if (!ethqos->needs_sgmii_loopback || + ethqos->phy_mode != PHY_INTERFACE_MODE_2500BASEX) + return; + + rgmii_updatel(ethqos, + SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN, + enable ? SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN : 0, + EMAC_WRAPPER_SGMII_PHY_CNTRL1); +} + static void ethqos_set_func_clk_en(struct qcom_ethqos *ethqos) { + qcom_ethqos_set_sgmii_loopback(ethqos, true); rgmii_updatel(ethqos, RGMII_CONFIG_FUNC_CLK_EN, RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG); } @@ -277,6 +297,7 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = { .has_emac_ge_3 = true, .link_clk_name = "phyaux", .has_integrated_pcs = true, + .needs_sgmii_loopback = true, .dma_addr_width = 36, .dwmac4_addrs = { .dma_chan = 0x00008100, @@ -674,6 +695,7 @@ static void ethqos_fix_mac_speed(void *priv, unsigned int speed, unsigned int mo { struct qcom_ethqos *ethqos = priv;
+ qcom_ethqos_set_sgmii_loopback(ethqos, false); ethqos->speed = speed; ethqos_update_link_clk(ethqos, speed); ethqos_configure(ethqos); @@ -809,6 +831,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) ethqos->num_por = data->num_por; ethqos->rgmii_config_loopback_en = data->rgmii_config_loopback_en; ethqos->has_emac_ge_3 = data->has_emac_ge_3; + ethqos->needs_sgmii_loopback = data->needs_sgmii_loopback;
ethqos->link_clk = devm_clk_get(dev, data->link_clk_name ?: "rgmii"); if (IS_ERR(ethqos->link_clk)) diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c index 98ffbb1bbf13..2d1c2e5706f8 100644 --- a/drivers/net/pse-pd/tps23881.c +++ b/drivers/net/pse-pd/tps23881.c @@ -5,6 +5,7 @@ * Copyright (c) 2023 Bootlin, Kory Maincent kory.maincent@bootlin.com */
+#include <linux/bitfield.h> #include <linux/delay.h> #include <linux/firmware.h> #include <linux/i2c.h> @@ -29,6 +30,8 @@ #define TPS23881_REG_TPON BIT(0) #define TPS23881_REG_FWREV 0x41 #define TPS23881_REG_DEVID 0x43 +#define TPS23881_REG_DEVID_MASK 0xF0 +#define TPS23881_DEVICE_ID 0x02 #define TPS23881_REG_SRAM_CTRL 0x60 #define TPS23881_REG_SRAM_DATA 0x61
@@ -750,7 +753,7 @@ static int tps23881_i2c_probe(struct i2c_client *client) if (ret < 0) return ret;
- if (ret != 0x22) { + if (FIELD_GET(TPS23881_REG_DEVID_MASK, ret) != TPS23881_DEVICE_ID) { dev_err(dev, "Wrong device ID\n"); return -ENXIO; } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 386d62769ded..cfda32047cff 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -201,6 +201,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb) break; default: /* not ip - do not know what to do */ + kfree_skb(skbn); goto skip; }
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 5161e7efda2c..f32e017b62e9 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3257,7 +3257,11 @@ static int virtnet_set_ringparam(struct net_device *dev, err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i, vi->intr_coal_tx.max_usecs, vi->intr_coal_tx.max_packets); - if (err) + + /* Don't break the tx resize action if the vq coalescing is not + * supported. The same is true for rx resize below. + */ + if (err && err != -EOPNOTSUPP) return err; }
@@ -3272,7 +3276,7 @@ static int virtnet_set_ringparam(struct net_device *dev, vi->intr_coal_rx.max_usecs, vi->intr_coal_rx.max_packets); mutex_unlock(&vi->rq[i].dim_lock); - if (err) + if (err && err != -EOPNOTSUPP) return err; } } diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c index 121f27284be5..1d287ed25a94 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@ -2793,6 +2793,7 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev peer = ath12k_peer_find(ab, vdev_id, peer_mac); if (!peer) { spin_unlock_bh(&ab->base_lock); + crypto_free_shash(tfm); ath12k_warn(ab, "failed to find the peer to set up fragment info\n"); return -ENOENT; } diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c index 55fde0d33183..f92b4ce49dfd 100644 --- a/drivers/net/wireless/ath/ath12k/pci.c +++ b/drivers/net/wireless/ath/ath12k/pci.c @@ -1091,14 +1091,14 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab) { int i;
- set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); - for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
napi_enable(&irq_grp->napi); ath12k_pci_ext_grp_enable(irq_grp); } + + set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); }
void ath12k_pci_ext_irq_disable(struct ath12k_base *ab) diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 2ea72d9e3957..4d2931e54427 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -23,6 +23,8 @@ MODULE_DESCRIPTION("USB basic driver for rtlwifi");
#define MAX_USBCTRL_VENDORREQ_TIMES 10
+static void _rtl_usb_cleanup_tx(struct ieee80211_hw *hw); + static void _usbctrl_vendorreq_sync(struct usb_device *udev, u8 reqtype, u16 value, void *pdata, u16 len) { @@ -285,9 +287,23 @@ static int _rtl_usb_init(struct ieee80211_hw *hw) } /* usb endpoint mapping */ err = rtlpriv->cfg->usb_interface_cfg->usb_endpoint_mapping(hw); - rtlusb->usb_mq_to_hwq = rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq; - _rtl_usb_init_tx(hw); - _rtl_usb_init_rx(hw); + if (err) + return err; + + rtlusb->usb_mq_to_hwq = rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq; + + err = _rtl_usb_init_tx(hw); + if (err) + return err; + + err = _rtl_usb_init_rx(hw); + if (err) + goto err_out; + + return 0; + +err_out: + _rtl_usb_cleanup_tx(hw); return err; }
@@ -691,17 +707,13 @@ static int rtl_usb_start(struct ieee80211_hw *hw) }
/*======================= tx =========================================*/ -static void rtl_usb_cleanup(struct ieee80211_hw *hw) +static void _rtl_usb_cleanup_tx(struct ieee80211_hw *hw) { u32 i; struct sk_buff *_skb; struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct ieee80211_tx_info *txinfo;
- /* clean up rx stuff. */ - _rtl_usb_cleanup_rx(hw); - - /* clean up tx stuff */ for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) { while ((_skb = skb_dequeue(&rtlusb->tx_skb_queue[i]))) { rtlusb->usb_tx_cleanup(hw, _skb); @@ -715,6 +727,12 @@ static void rtl_usb_cleanup(struct ieee80211_hw *hw) usb_kill_anchored_urbs(&rtlusb->tx_submitted); }
+static void rtl_usb_cleanup(struct ieee80211_hw *hw) +{ + _rtl_usb_cleanup_rx(hw); + _rtl_usb_cleanup_tx(hw); +} + /* We may add some struct into struct rtl_usb later. Do deinit here. */ static void rtl_usb_deinit(struct ieee80211_hw *hw) { diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index b36aa9a6bb3f..312b57d7da64 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -183,14 +183,17 @@ static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, struct sk_buff *skb) { - struct rtw89_pci_rxbd_info *rxbd_info; struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); + struct rtw89_pci_rxbd_info *rxbd_info; + __le32 info;
rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; - rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS); - rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS); - rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE); - rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG); + info = rxbd_info->dword; + + rx_info->fs = le32_get_bits(info, RTW89_PCI_RXBD_FS); + rx_info->ls = le32_get_bits(info, RTW89_PCI_RXBD_LS); + rx_info->len = le32_get_bits(info, RTW89_PCI_RXBD_WRITE_SIZE); + rx_info->tag = le32_get_bits(info, RTW89_PCI_RXBD_TAG); }
static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev, diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index 0cfa39361d3b..25ecc1a005c5 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -1388,7 +1388,7 @@ static void devm_apple_nvme_mempool_destroy(void *data) mempool_destroy(data); }
-static int apple_nvme_probe(struct platform_device *pdev) +static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct apple_nvme *anv; @@ -1396,7 +1396,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL); if (!anv) - return -ENOMEM; + return ERR_PTR(-ENOMEM);
anv->dev = get_device(dev); anv->adminq.is_adminq = true; @@ -1516,10 +1516,26 @@ static int apple_nvme_probe(struct platform_device *pdev) goto put_dev; }
+ return anv; +put_dev: + put_device(anv->dev); + return ERR_PTR(ret); +} + +static int apple_nvme_probe(struct platform_device *pdev) +{ + struct apple_nvme *anv; + int ret; + + anv = apple_nvme_alloc(pdev); + if (IS_ERR(anv)) + return PTR_ERR(anv); + anv->ctrl.admin_q = blk_mq_alloc_queue(&anv->admin_tagset, NULL, NULL); if (IS_ERR(anv->ctrl.admin_q)) { ret = -ENOMEM; - goto put_dev; + anv->ctrl.admin_q = NULL; + goto out_uninit_ctrl; }
nvme_reset_ctrl(&anv->ctrl); @@ -1527,8 +1543,9 @@ static int apple_nvme_probe(struct platform_device *pdev)
return 0;
-put_dev: - put_device(anv->dev); +out_uninit_ctrl: + nvme_uninit_ctrl(&anv->ctrl); + nvme_put_ctrl(&anv->ctrl); return ret; }
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 5a93f021ca4f..7168ff4cc62b 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -826,9 +826,9 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, struct nvme_command *cmnd) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct bio_vec bv = rq_integrity_vec(req);
- iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), - rq_dma_dir(req), 0); + iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0); if (dma_mapping_error(dev->dev, iod->meta_dma)) return BLK_STS_IOERR; cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); @@ -968,7 +968,7 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req) struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
dma_unmap_page(dev->dev, iod->meta_dma, - rq_integrity_vec(req)->bv_len, rq_dma_dir(req)); + rq_integrity_vec(req).bv_len, rq_dma_dir(req)); }
if (blk_rq_nr_phys_segments(req)) diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index ddfbfec44f4c..43e0914256a3 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -39,6 +39,11 @@ static bool cros_ec_lpc_acpi_device_found; * be used as the base port for EC mapped memory. */ #define CROS_EC_LPC_QUIRK_REMAP_MEMORY BIT(0) +/* + * Indicates that lpc_driver_data.quirk_acpi_id should be used to find + * the ACPI device. + */ +#define CROS_EC_LPC_QUIRK_ACPI_ID BIT(1)
/** * struct lpc_driver_data - driver data attached to a DMI device ID to indicate @@ -46,10 +51,12 @@ static bool cros_ec_lpc_acpi_device_found; * @quirks: a bitfield composed of quirks from CROS_EC_LPC_QUIRK_* * @quirk_mmio_memory_base: The first I/O port addressing EC mapped memory (used * when quirk ...REMAP_MEMORY is set.) + * @quirk_acpi_id: An ACPI HID to be used to find the ACPI device. */ struct lpc_driver_data { u32 quirks; u16 quirk_mmio_memory_base; + const char *quirk_acpi_id; };
/** @@ -374,6 +381,26 @@ static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data) pm_system_wakeup(); }
+static acpi_status cros_ec_lpc_parse_device(acpi_handle handle, u32 level, + void *context, void **retval) +{ + *(struct acpi_device **)context = acpi_fetch_acpi_dev(handle); + return AE_CTRL_TERMINATE; +} + +static struct acpi_device *cros_ec_lpc_get_device(const char *id) +{ + struct acpi_device *adev = NULL; + acpi_status status = acpi_get_devices(id, cros_ec_lpc_parse_device, + &adev, NULL); + if (ACPI_FAILURE(status)) { + pr_warn(DRV_NAME ": Looking for %s failed\n", id); + return NULL; + } + + return adev; +} + static int cros_ec_lpc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -401,6 +428,16 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
if (quirks & CROS_EC_LPC_QUIRK_REMAP_MEMORY) ec_lpc->mmio_memory_base = driver_data->quirk_mmio_memory_base; + + if (quirks & CROS_EC_LPC_QUIRK_ACPI_ID) { + adev = cros_ec_lpc_get_device(driver_data->quirk_acpi_id); + if (!adev) { + dev_err(dev, "failed to get ACPI device '%s'", + driver_data->quirk_acpi_id); + return -ENODEV; + } + ACPI_COMPANION_SET(dev, adev); + } }
/* @@ -661,23 +698,12 @@ static struct platform_device cros_ec_lpc_device = { .name = DRV_NAME };
-static acpi_status cros_ec_lpc_parse_device(acpi_handle handle, u32 level, - void *context, void **retval) -{ - *(bool *)context = true; - return AE_CTRL_TERMINATE; -} - static int __init cros_ec_lpc_init(void) { int ret; - acpi_status status; const struct dmi_system_id *dmi_match;
- status = acpi_get_devices(ACPI_DRV_NAME, cros_ec_lpc_parse_device, - &cros_ec_lpc_acpi_device_found, NULL); - if (ACPI_FAILURE(status)) - pr_warn(DRV_NAME ": Looking for %s failed\n", ACPI_DRV_NAME); + cros_ec_lpc_acpi_device_found = !!cros_ec_lpc_get_device(ACPI_DRV_NAME);
dmi_match = dmi_first_match(cros_ec_lpc_dmi_table);
diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index 282e4bfe30da..be3d51ed0e47 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -221,8 +221,8 @@ static int doscan(void *data) */ static void ifs_test_core(int cpu, struct device *dev) { + union ifs_status status = {}; union ifs_scan activate; - union ifs_status status; unsigned long timeout; struct ifs_data *ifsd; int to_start, to_stop; diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c index 9b7ce03ba085..a353e830b65f 100644 --- a/drivers/platform/x86/intel/vbtn.c +++ b/drivers/platform/x86/intel/vbtn.c @@ -7,11 +7,13 @@ */
#include <linux/acpi.h> +#include <linux/cleanup.h> #include <linux/dmi.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/suspend.h> #include "../dual_accel_detect.h" @@ -66,6 +68,7 @@ static const struct key_entry intel_vbtn_switchmap[] = { };
struct intel_vbtn_priv { + struct mutex mutex; /* Avoid notify_handler() racing with itself */ struct input_dev *buttons_dev; struct input_dev *switches_dev; bool dual_accel; @@ -155,6 +158,8 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) bool autorelease; int ret;
+ guard(mutex)(&priv->mutex); + if ((ke = sparse_keymap_entry_from_scancode(priv->buttons_dev, event))) { if (!priv->has_buttons) { dev_warn(&device->dev, "Warning: received 0x%02x button event on a device without buttons, please report this.\n", @@ -290,6 +295,10 @@ static int intel_vbtn_probe(struct platform_device *device) return -ENOMEM; dev_set_drvdata(&device->dev, priv);
+ err = devm_mutex_init(&device->dev, &priv->mutex); + if (err) + return err; + priv->dual_accel = dual_accel; priv->has_buttons = has_buttons; priv->has_switches = has_switches; diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c index b5903193e2f9..ac05942e4e6a 100644 --- a/drivers/power/supply/axp288_charger.c +++ b/drivers/power/supply/axp288_charger.c @@ -178,18 +178,18 @@ static inline int axp288_charger_set_cv(struct axp288_chrg_info *info, int cv) u8 reg_val; int ret;
- if (cv <= CV_4100MV) { - reg_val = CHRG_CCCV_CV_4100MV; - cv = CV_4100MV; - } else if (cv <= CV_4150MV) { - reg_val = CHRG_CCCV_CV_4150MV; - cv = CV_4150MV; - } else if (cv <= CV_4200MV) { + if (cv >= CV_4350MV) { + reg_val = CHRG_CCCV_CV_4350MV; + cv = CV_4350MV; + } else if (cv >= CV_4200MV) { reg_val = CHRG_CCCV_CV_4200MV; cv = CV_4200MV; + } else if (cv >= CV_4150MV) { + reg_val = CHRG_CCCV_CV_4150MV; + cv = CV_4150MV; } else { - reg_val = CHRG_CCCV_CV_4350MV; - cv = CV_4350MV; + reg_val = CHRG_CCCV_CV_4100MV; + cv = CV_4100MV; }
reg_val = reg_val << CHRG_CCCV_CV_BIT_POS; @@ -337,8 +337,8 @@ static int axp288_charger_usb_set_property(struct power_supply *psy, } break; case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: - scaled_val = min(val->intval, info->max_cv); - scaled_val = DIV_ROUND_CLOSEST(scaled_val, 1000); + scaled_val = DIV_ROUND_CLOSEST(val->intval, 1000); + scaled_val = min(scaled_val, info->max_cv); ret = axp288_charger_set_cv(info, scaled_val); if (ret < 0) { dev_warn(&info->pdev->dev, "set charge voltage failed\n"); diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c index ec163d1bcd18..44c6301f5f17 100644 --- a/drivers/power/supply/qcom_battmgr.c +++ b/drivers/power/supply/qcom_battmgr.c @@ -486,7 +486,7 @@ static int qcom_battmgr_bat_get_property(struct power_supply *psy, int ret;
if (!battmgr->service_up) - return -ENODEV; + return -EAGAIN;
if (battmgr->variant == QCOM_BATTMGR_SC8280XP) ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp); @@ -683,7 +683,7 @@ static int qcom_battmgr_ac_get_property(struct power_supply *psy, int ret;
if (!battmgr->service_up) - return -ENODEV; + return -EAGAIN;
ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp); if (ret) @@ -748,7 +748,7 @@ static int qcom_battmgr_usb_get_property(struct power_supply *psy, int ret;
if (!battmgr->service_up) - return -ENODEV; + return -EAGAIN;
if (battmgr->variant == QCOM_BATTMGR_SC8280XP) ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp); @@ -867,7 +867,7 @@ static int qcom_battmgr_wls_get_property(struct power_supply *psy, int ret;
if (!battmgr->service_up) - return -ENODEV; + return -EAGAIN;
if (battmgr->variant == QCOM_BATTMGR_SC8280XP) ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp); diff --git a/drivers/power/supply/rt5033_battery.c b/drivers/power/supply/rt5033_battery.c index 32eafe2c00af..7a27b262fb84 100644 --- a/drivers/power/supply/rt5033_battery.c +++ b/drivers/power/supply/rt5033_battery.c @@ -159,6 +159,7 @@ static int rt5033_battery_probe(struct i2c_client *client) return -EINVAL; }
+ i2c_set_clientdata(client, battery); psy_cfg.of_node = client->dev.of_node; psy_cfg.drv_data = battery;
diff --git a/drivers/s390/char/sclp_sd.c b/drivers/s390/char/sclp_sd.c index f9e164be7568..944e75beb160 100644 --- a/drivers/s390/char/sclp_sd.c +++ b/drivers/s390/char/sclp_sd.c @@ -320,8 +320,14 @@ static int sclp_sd_store_data(struct sclp_sd_data *result, u8 di) &esize); if (rc) { /* Cancel running request if interrupted */ - if (rc == -ERESTARTSYS) - sclp_sd_sync(page, SD_EQ_HALT, di, 0, 0, NULL, NULL); + if (rc == -ERESTARTSYS) { + if (sclp_sd_sync(page, SD_EQ_HALT, di, 0, 0, NULL, NULL)) { + pr_warn("Could not stop Store Data request - leaking at least %zu bytes\n", + (size_t)dsize * PAGE_SIZE); + data = NULL; + asce = 0; + } + } vfree(data); goto out; } diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index bce639a6cca1..c051692395cc 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -3453,6 +3453,17 @@ static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, scmd->sc_data_direction); priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ } else { + /* + * Some firmware versions byte-swap the REPORT ZONES command + * reply from ATA-ZAC devices by directly accessing in the host + * buffer. This does not respect the default command DMA + * direction and causes IOMMU page faults on some architectures + * with an IOMMU enforcing write mappings (e.g. AMD hosts). + * Avoid such issue by making the REPORT ZONES buffer mapping + * bi-directional. + */ + if (scmd->cmnd[0] == ZBC_IN && scmd->cmnd[1] == ZI_REPORT_ZONES) + scmd->sc_data_direction = DMA_BIDIRECTIONAL; sg_scmd = scsi_sglist(scmd); sges_left = scsi_dma_map(scmd); } diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index b2bcf4a27ddc..b785a7e88b49 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -2671,6 +2671,22 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); }
+static inline int _base_scsi_dma_map(struct scsi_cmnd *cmd) +{ + /* + * Some firmware versions byte-swap the REPORT ZONES command reply from + * ATA-ZAC devices by directly accessing in the host buffer. This does + * not respect the default command DMA direction and causes IOMMU page + * faults on some architectures with an IOMMU enforcing write mappings + * (e.g. AMD hosts). Avoid such issue by making the report zones buffer + * mapping bi-directional. + */ + if (cmd->cmnd[0] == ZBC_IN && cmd->cmnd[1] == ZI_REPORT_ZONES) + cmd->sc_data_direction = DMA_BIDIRECTIONAL; + + return scsi_dma_map(cmd); +} + /** * _base_build_sg_scmd - main sg creation routine * pcie_device is unused here! @@ -2717,7 +2733,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
sg_scmd = scsi_sglist(scmd); - sges_left = scsi_dma_map(scmd); + sges_left = _base_scsi_dma_map(scmd); if (sges_left < 0) return -ENOMEM;
@@ -2861,7 +2877,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, }
sg_scmd = scsi_sglist(scmd); - sges_left = scsi_dma_map(scmd); + sges_left = _base_scsi_dma_map(scmd); if (sges_left < 0) return -ENOMEM;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 1b7561abe05d..6b64af7d4927 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -4119,6 +4119,8 @@ static int sd_resume(struct device *dev) { struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); + if (opal_unlock_from_suspend(sdkp->opal_dev)) { sd_printk(KERN_NOTICE, sdkp, "OPAL unlock failed\n"); return -EIO; @@ -4135,13 +4137,12 @@ static int sd_resume_common(struct device *dev, bool runtime) if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ return 0;
- sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); - if (!sd_do_start_stop(sdkp->device, runtime)) { sdkp->suspended = false; return 0; }
+ sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); ret = sd_start_stop_device(sdkp, 1); if (!ret) { sd_resume(dev); diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c index ecddb60bd665..e7851974084b 100644 --- a/drivers/soc/qcom/icc-bwmon.c +++ b/drivers/soc/qcom/icc-bwmon.c @@ -783,9 +783,14 @@ static int bwmon_probe(struct platform_device *pdev) bwmon->dev = dev;
bwmon_disable(bwmon); - ret = devm_request_threaded_irq(dev, bwmon->irq, bwmon_intr, - bwmon_intr_thread, - IRQF_ONESHOT, dev_name(dev), bwmon); + + /* + * SoCs with multiple cpu-bwmon instances can end up using a shared interrupt + * line. Using the devm_ variant might result in the IRQ handler being executed + * after bwmon_disable in bwmon_remove() + */ + ret = request_threaded_irq(bwmon->irq, bwmon_intr, bwmon_intr_thread, + IRQF_ONESHOT | IRQF_SHARED, dev_name(dev), bwmon); if (ret) return dev_err_probe(dev, ret, "failed to request IRQ\n");
@@ -800,6 +805,7 @@ static void bwmon_remove(struct platform_device *pdev) struct icc_bwmon *bwmon = platform_get_drvdata(pdev);
bwmon_disable(bwmon); + free_irq(bwmon->irq, bwmon); }
static const struct icc_bwmon_data msm8998_bwmon_data = { diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index aa5ed254be46..f2d7eedd324b 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -296,7 +296,7 @@ static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi) static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) { struct lpspi_config config = fsl_lpspi->config; - unsigned int perclk_rate, scldiv; + unsigned int perclk_rate, scldiv, div; u8 prescale;
perclk_rate = clk_get_rate(fsl_lpspi->clk_per); @@ -313,8 +313,10 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) return -EINVAL; }
+ div = DIV_ROUND_UP(perclk_rate, config.speed_hz); + for (prescale = 0; prescale < 8; prescale++) { - scldiv = perclk_rate / config.speed_hz / (1 << prescale) - 2; + scldiv = div / (1 << prescale) - 2; if (scldiv < 256) { fsl_lpspi->config.prescale = prescale; break; diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 05e6d007f9a7..5304728c68c2 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -700,6 +700,7 @@ static const struct class spidev_class = { };
static const struct spi_device_id spidev_spi_ids[] = { + { .name = "bh2228fv" }, { .name = "dh2228fv" }, { .name = "ltc2488" }, { .name = "sx1301" }, diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index 791cdc160c51..f36f57bde077 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -398,7 +398,7 @@ static int pmic_arb_fmt_read_cmd(struct spmi_pmic_arb_bus *bus, u8 opc, u8 sid,
*offset = rc; if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { - dev_err(&bus->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested", + dev_err(&bus->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested\n", PMIC_ARB_MAX_TRANS_BYTES, len); return -EINVAL; } @@ -477,7 +477,7 @@ static int pmic_arb_fmt_write_cmd(struct spmi_pmic_arb_bus *bus, u8 opc,
*offset = rc; if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { - dev_err(&bus->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested", + dev_err(&bus->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested\n", PMIC_ARB_MAX_TRANS_BYTES, len); return -EINVAL; } @@ -1702,7 +1702,7 @@ static int spmi_pmic_arb_bus_init(struct platform_device *pdev,
index = of_property_match_string(node, "reg-names", "cnfg"); if (index < 0) { - dev_err(dev, "cnfg reg region missing"); + dev_err(dev, "cnfg reg region missing\n"); return -EINVAL; }
@@ -1712,7 +1712,7 @@ static int spmi_pmic_arb_bus_init(struct platform_device *pdev,
index = of_property_match_string(node, "reg-names", "intr"); if (index < 0) { - dev_err(dev, "intr reg region missing"); + dev_err(dev, "intr reg region missing\n"); return -EINVAL; }
@@ -1737,8 +1737,7 @@ static int spmi_pmic_arb_bus_init(struct platform_device *pdev,
dev_dbg(&pdev->dev, "adding irq domain for bus %d\n", bus_index);
- bus->domain = irq_domain_add_tree(dev->of_node, - &pmic_arb_irq_domain_ops, bus); + bus->domain = irq_domain_add_tree(node, &pmic_arb_irq_domain_ops, bus); if (!bus->domain) { dev_err(&pdev->dev, "unable to create irq_domain\n"); return -ENOMEM; diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c index a180a98bb9f1..5b18a46a10b0 100644 --- a/drivers/thermal/intel/intel_hfi.c +++ b/drivers/thermal/intel/intel_hfi.c @@ -401,10 +401,10 @@ static void hfi_disable(void) * intel_hfi_online() - Enable HFI on @cpu * @cpu: CPU in which the HFI will be enabled * - * Enable the HFI to be used in @cpu. The HFI is enabled at the die/package - * level. The first CPU in the die/package to come online does the full HFI + * Enable the HFI to be used in @cpu. The HFI is enabled at the package + * level. The first CPU in the package to come online does the full HFI * initialization. Subsequent CPUs will just link themselves to the HFI - * instance of their die/package. + * instance of their package. * * This function is called before enabling the thermal vector in the local APIC * in order to ensure that @cpu has an associated HFI instance when it receives @@ -414,31 +414,31 @@ void intel_hfi_online(unsigned int cpu) { struct hfi_instance *hfi_instance; struct hfi_cpu_info *info; - u16 die_id; + u16 pkg_id;
/* Nothing to do if hfi_instances are missing. */ if (!hfi_instances) return;
/* - * Link @cpu to the HFI instance of its package/die. It does not + * Link @cpu to the HFI instance of its package. It does not * matter whether the instance has been initialized. */ info = &per_cpu(hfi_cpu_info, cpu); - die_id = topology_logical_die_id(cpu); + pkg_id = topology_logical_package_id(cpu); hfi_instance = info->hfi_instance; if (!hfi_instance) { - if (die_id >= max_hfi_instances) + if (pkg_id >= max_hfi_instances) return;
- hfi_instance = &hfi_instances[die_id]; + hfi_instance = &hfi_instances[pkg_id]; info->hfi_instance = hfi_instance; }
init_hfi_cpu_index(info);
/* - * Now check if the HFI instance of the package/die of @cpu has been + * Now check if the HFI instance of the package of @cpu has been * initialized (by checking its header). In such case, all we have to * do is to add @cpu to this instance's cpumask and enable the instance * if needed. @@ -504,7 +504,7 @@ void intel_hfi_online(unsigned int cpu) * * On some processors, hardware remembers previous programming settings even * after being reprogrammed. Thus, keep HFI enabled even if all CPUs in the - * die/package of @cpu are offline. See note in intel_hfi_online(). + * package of @cpu are offline. See note in intel_hfi_online(). */ void intel_hfi_offline(unsigned int cpu) { @@ -674,9 +674,13 @@ void __init intel_hfi_init(void) if (hfi_parse_features()) return;
- /* There is one HFI instance per die/package. */ - max_hfi_instances = topology_max_packages() * - topology_max_dies_per_package(); + /* + * Note: HFI resources are managed at the physical package scope. + * There could be platforms that enumerate packages as Linux dies. + * Special handling would be needed if this happens on an HFI-capable + * platform. + */ + max_hfi_instances = topology_max_packages();
/* * This allocation may fail. CPU hotplug callbacks must check diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index bf0065d1c8e9..9547aa8f4520 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -326,6 +326,7 @@ struct sc16is7xx_one { struct kthread_work reg_work; struct kthread_delayed_work ms_work; struct sc16is7xx_one_config config; + unsigned char buf[SC16IS7XX_FIFO_SIZE]; /* Rx buffer. */ unsigned int old_mctrl; u8 old_lcr; /* Value before EFR access. */ bool irda_mode; @@ -339,7 +340,6 @@ struct sc16is7xx_port { unsigned long gpio_valid_mask; #endif u8 mctrl_mask; - unsigned char buf[SC16IS7XX_FIFO_SIZE]; struct kthread_worker kworker; struct task_struct *kworker_task; struct sc16is7xx_one p[]; @@ -591,6 +591,8 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud) SC16IS7XX_MCR_CLKSEL_BIT, prescaler == 1 ? 0 : SC16IS7XX_MCR_CLKSEL_BIT);
+ mutex_lock(&one->efr_lock); + /* Backup LCR and access special register set (DLL/DLH) */ lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG); sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, @@ -605,24 +607,26 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud) /* Restore LCR and access to general register set */ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
+ mutex_unlock(&one->efr_lock); + return DIV_ROUND_CLOSEST((clk / prescaler) / 16, div); }
static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen, unsigned int iir) { - struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); unsigned int lsr = 0, bytes_read, i; bool read_lsr = (iir == SC16IS7XX_IIR_RLSE_SRC) ? true : false; u8 ch, flag;
- if (unlikely(rxlen >= sizeof(s->buf))) { + if (unlikely(rxlen >= sizeof(one->buf))) { dev_warn_ratelimited(port->dev, "ttySC%i: Possible RX FIFO overrun: %d\n", port->line, rxlen); port->icount.buf_overrun++; /* Ensure sanity of RX level */ - rxlen = sizeof(s->buf); + rxlen = sizeof(one->buf); }
while (rxlen) { @@ -635,10 +639,10 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen, lsr = 0;
if (read_lsr) { - s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG); + one->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG); bytes_read = 1; } else { - sc16is7xx_fifo_read(port, s->buf, rxlen); + sc16is7xx_fifo_read(port, one->buf, rxlen); bytes_read = rxlen; }
@@ -671,7 +675,7 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen, }
for (i = 0; i < bytes_read; ++i) { - ch = s->buf[i]; + ch = one->buf[i]; if (uart_handle_sysrq_char(port, ch)) continue;
@@ -689,10 +693,10 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen,
static void sc16is7xx_handle_tx(struct uart_port *port) { - struct sc16is7xx_port *s = dev_get_drvdata(port->dev); struct tty_port *tport = &port->state->port; unsigned long flags; unsigned int txlen; + unsigned char *tail;
if (unlikely(port->x_char)) { sc16is7xx_port_write(port, SC16IS7XX_THR_REG, port->x_char); @@ -717,8 +721,9 @@ static void sc16is7xx_handle_tx(struct uart_port *port) txlen = 0; }
- txlen = uart_fifo_out(port, s->buf, txlen); - sc16is7xx_fifo_write(port, s->buf, txlen); + txlen = kfifo_out_linear_ptr(&tport->xmit_fifo, &tail, txlen); + sc16is7xx_fifo_write(port, tail, txlen); + uart_xmit_advance(port, txlen);
uart_port_lock_irqsave(port, &flags); if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 2a8006e3d687..9967444eae10 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -881,6 +881,14 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port, new_flags = (__force upf_t)new_info->flags; old_custom_divisor = uport->custom_divisor;
+ if (!(uport->flags & UPF_FIXED_PORT)) { + unsigned int uartclk = new_info->baud_base * 16; + /* check needs to be done here before other settings made */ + if (uartclk == 0) { + retval = -EINVAL; + goto exit; + } + } if (!capable(CAP_SYS_ADMIN)) { retval = -EPERM; if (change_irq || change_port || diff --git a/drivers/tty/vt/conmakehash.c b/drivers/tty/vt/conmakehash.c index dc2177fec715..82d9db68b2ce 100644 --- a/drivers/tty/vt/conmakehash.c +++ b/drivers/tty/vt/conmakehash.c @@ -11,6 +11,8 @@ * Copyright (C) 1995-1997 H. Peter Anvin */
+#include <libgen.h> +#include <linux/limits.h> #include <stdio.h> #include <stdlib.h> #include <sysexits.h> @@ -76,8 +78,8 @@ static void addpair(int fp, int un) int main(int argc, char *argv[]) { FILE *ctbl; - const char *tblname, *rel_tblname; - const char *abs_srctree; + const char *tblname; + char base_tblname[PATH_MAX]; char buffer[65536]; int fontlen; int i, nuni, nent; @@ -102,16 +104,6 @@ int main(int argc, char *argv[]) } }
- abs_srctree = getenv("abs_srctree"); - if (abs_srctree && !strncmp(abs_srctree, tblname, strlen(abs_srctree))) - { - rel_tblname = tblname + strlen(abs_srctree); - while (*rel_tblname == '/') - ++rel_tblname; - } - else - rel_tblname = tblname; - /* For now we assume the default font is always 256 characters. */ fontlen = 256;
@@ -253,6 +245,8 @@ int main(int argc, char *argv[]) for ( i = 0 ; i < fontlen ; i++ ) nuni += unicount[i];
+ strncpy(base_tblname, tblname, PATH_MAX); + base_tblname[PATH_MAX - 1] = 0; printf("\ /*\n\ * Do not edit this file; it was automatically generated by\n\ @@ -264,7 +258,7 @@ int main(int argc, char *argv[]) #include <linux/types.h>\n\ \n\ u8 dfont_unicount[%d] = \n\ -{\n\t", rel_tblname, fontlen); +{\n\t", basename(base_tblname), fontlen);
for ( i = 0 ; i < fontlen ; i++ ) { diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index f42d99ce5bf1..81d6f0cfb148 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -329,6 +329,11 @@ static inline int ufshcd_rpm_get_sync(struct ufs_hba *hba) return pm_runtime_get_sync(&hba->ufs_device_wlun->sdev_gendev); }
+static inline int ufshcd_rpm_get_if_active(struct ufs_hba *hba) +{ + return pm_runtime_get_if_active(&hba->ufs_device_wlun->sdev_gendev); +} + static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba) { return pm_runtime_put_sync(&hba->ufs_device_wlun->sdev_gendev); diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 46433ecf0c4d..5864d65448ce 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -4086,11 +4086,16 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US - delta; else - return; /* no more delay required */ + min_sleep_time_us = 0; /* no more delay required */ }
- /* allow sleep for extra 50us if needed */ - usleep_range(min_sleep_time_us, min_sleep_time_us + 50); + if (min_sleep_time_us > 0) { + /* allow sleep for extra 50us if needed */ + usleep_range(min_sleep_time_us, min_sleep_time_us + 50); + } + + /* update the last_dme_cmd_tstamp */ + hba->last_dme_cmd_tstamp = ktime_get(); }
/** @@ -8171,7 +8176,10 @@ static void ufshcd_update_rtc(struct ufs_hba *hba) */ val = ts64.tv_sec - hba->dev_info.rtc_time_baseline;
- ufshcd_rpm_get_sync(hba); + /* Skip update RTC if RPM state is not RPM_ACTIVE */ + if (ufshcd_rpm_get_if_active(hba) <= 0) + return; + err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_SECONDS_PASSED, 0, 0, &val); ufshcd_rpm_put_sync(hba); @@ -10220,9 +10228,6 @@ int ufshcd_system_restore(struct device *dev) */ ufshcd_readl(hba, REG_UTP_TASK_REQ_LIST_BASE_H);
- /* Resuming from hibernate, assume that link was OFF */ - ufshcd_set_link_off(hba); - return 0;
} diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 1f21459b1188..a7bc715bcafa 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -3731,10 +3731,10 @@ static int ffs_func_set_alt(struct usb_function *f, struct ffs_data *ffs = func->ffs; int ret = 0, intf;
- if (alt > MAX_ALT_SETTINGS) - return -EINVAL; - if (alt != (unsigned)-1) { + if (alt > MAX_ALT_SETTINGS) + return -EINVAL; + intf = ffs_func_revmap_intf(func, interface); if (intf < 0) return intf; diff --git a/drivers/usb/gadget/function/f_midi2.c b/drivers/usb/gadget/function/f_midi2.c index 0e38bb145e8f..6908fdd4a83f 100644 --- a/drivers/usb/gadget/function/f_midi2.c +++ b/drivers/usb/gadget/function/f_midi2.c @@ -642,12 +642,21 @@ static void process_ump_stream_msg(struct f_midi2_ep *ep, const u32 *data) if (format) return; // invalid blk = (*data >> 8) & 0xff; - if (blk >= ep->num_blks) - return; - if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO) - reply_ump_stream_fb_info(ep, blk); - if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME) - reply_ump_stream_fb_name(ep, blk); + if (blk == 0xff) { + /* inquiry for all blocks */ + for (blk = 0; blk < ep->num_blks; blk++) { + if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO) + reply_ump_stream_fb_info(ep, blk); + if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME) + reply_ump_stream_fb_name(ep, blk); + } + } else if (blk < ep->num_blks) { + /* only the specified block */ + if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO) + reply_ump_stream_fb_info(ep, blk); + if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME) + reply_ump_stream_fb_name(ep, blk); + } return; } } diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c index 89af0feb7512..24299576972f 100644 --- a/drivers/usb/gadget/function/u_audio.c +++ b/drivers/usb/gadget/function/u_audio.c @@ -592,16 +592,25 @@ int u_audio_start_capture(struct g_audio *audio_dev) struct usb_ep *ep, *ep_fback; struct uac_rtd_params *prm; struct uac_params *params = &audio_dev->params; - int req_len, i; + int req_len, i, ret;
prm = &uac->c_prm; dev_dbg(dev, "start capture with rate %d\n", prm->srate); ep = audio_dev->out_ep; - config_ep_by_speed(gadget, &audio_dev->func, ep); + ret = config_ep_by_speed(gadget, &audio_dev->func, ep); + if (ret < 0) { + dev_err(dev, "config_ep_by_speed for out_ep failed (%d)\n", ret); + return ret; + } + req_len = ep->maxpacket;
prm->ep_enabled = true; - usb_ep_enable(ep); + ret = usb_ep_enable(ep); + if (ret < 0) { + dev_err(dev, "usb_ep_enable failed for out_ep (%d)\n", ret); + return ret; + }
for (i = 0; i < params->req_number; i++) { if (!prm->reqs[i]) { @@ -629,9 +638,18 @@ int u_audio_start_capture(struct g_audio *audio_dev) return 0;
/* Setup feedback endpoint */ - config_ep_by_speed(gadget, &audio_dev->func, ep_fback); + ret = config_ep_by_speed(gadget, &audio_dev->func, ep_fback); + if (ret < 0) { + dev_err(dev, "config_ep_by_speed in_ep_fback failed (%d)\n", ret); + return ret; // TODO: Clean up out_ep + } + prm->fb_ep_enabled = true; - usb_ep_enable(ep_fback); + ret = usb_ep_enable(ep_fback); + if (ret < 0) { + dev_err(dev, "usb_ep_enable failed for in_ep_fback (%d)\n", ret); + return ret; // TODO: Clean up out_ep + } req_len = ep_fback->maxpacket;
req_fback = usb_ep_alloc_request(ep_fback, GFP_ATOMIC); @@ -687,13 +705,17 @@ int u_audio_start_playback(struct g_audio *audio_dev) struct uac_params *params = &audio_dev->params; unsigned int factor; const struct usb_endpoint_descriptor *ep_desc; - int req_len, i; + int req_len, i, ret; unsigned int p_pktsize;
prm = &uac->p_prm; dev_dbg(dev, "start playback with rate %d\n", prm->srate); ep = audio_dev->in_ep; - config_ep_by_speed(gadget, &audio_dev->func, ep); + ret = config_ep_by_speed(gadget, &audio_dev->func, ep); + if (ret < 0) { + dev_err(dev, "config_ep_by_speed for in_ep failed (%d)\n", ret); + return ret; + }
ep_desc = ep->desc; /* @@ -720,7 +742,11 @@ int u_audio_start_playback(struct g_audio *audio_dev) uac->p_residue_mil = 0;
prm->ep_enabled = true; - usb_ep_enable(ep); + ret = usb_ep_enable(ep); + if (ret < 0) { + dev_err(dev, "usb_ep_enable failed for in_ep (%d)\n", ret); + return ret; + }
for (i = 0; i < params->req_number; i++) { if (!prm->reqs[i]) { diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index a92eb6d90976..8962f96ae729 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -1441,6 +1441,7 @@ void gserial_suspend(struct gserial *gser) spin_lock(&port->port_lock); spin_unlock(&serial_port_lock); port->suspended = true; + port->start_delayed = true; spin_unlock_irqrestore(&port->port_lock, flags); } EXPORT_SYMBOL_GPL(gserial_suspend); diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 2dfae7a17b3f..81f9140f3681 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -118,12 +118,10 @@ int usb_ep_enable(struct usb_ep *ep) goto out;
/* UDC drivers can't handle endpoints with maxpacket size 0 */ - if (usb_endpoint_maxp(ep->desc) == 0) { - /* - * We should log an error message here, but we can't call - * dev_err() because there's no way to find the gadget - * given only ep. - */ + if (!ep->desc || usb_endpoint_maxp(ep->desc) == 0) { + WARN_ONCE(1, "%s: ep%d (%s) has %s\n", __func__, ep->address, ep->name, + (!ep->desc) ? "NULL descriptor" : "maxpacket 0"); + ret = -EINVAL; goto out; } diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c index 6934970f180d..5a8869cd95d5 100644 --- a/drivers/usb/serial/usb_debug.c +++ b/drivers/usb/serial/usb_debug.c @@ -76,6 +76,11 @@ static void usb_debug_process_read_urb(struct urb *urb) usb_serial_generic_process_read_urb(urb); }
+static void usb_debug_init_termios(struct tty_struct *tty) +{ + tty->termios.c_lflag &= ~(ECHO | ECHONL); +} + static struct usb_serial_driver debug_device = { .driver = { .owner = THIS_MODULE, @@ -85,6 +90,7 @@ static struct usb_serial_driver debug_device = { .num_ports = 1, .bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE, .break_ctl = usb_debug_break_ctl, + .init_termios = usb_debug_init_termios, .process_read_urb = usb_debug_process_read_urb, };
@@ -96,6 +102,7 @@ static struct usb_serial_driver dbc_device = { .id_table = dbc_id_table, .num_ports = 1, .break_ctl = usb_debug_break_ctl, + .init_termios = usb_debug_init_termios, .process_read_urb = usb_debug_process_read_urb, };
diff --git a/drivers/usb/typec/mux/fsa4480.c b/drivers/usb/typec/mux/fsa4480.c index cb7cdf90cb0a..cd235339834b 100644 --- a/drivers/usb/typec/mux/fsa4480.c +++ b/drivers/usb/typec/mux/fsa4480.c @@ -13,6 +13,10 @@ #include <linux/usb/typec_dp.h> #include <linux/usb/typec_mux.h>
+#define FSA4480_DEVICE_ID 0x00 + #define FSA4480_DEVICE_ID_VENDOR_ID GENMASK(7, 6) + #define FSA4480_DEVICE_ID_VERSION_ID GENMASK(5, 3) + #define FSA4480_DEVICE_ID_REV_ID GENMASK(2, 0) #define FSA4480_SWITCH_ENABLE 0x04 #define FSA4480_SWITCH_SELECT 0x05 #define FSA4480_SWITCH_STATUS1 0x07 @@ -251,6 +255,7 @@ static int fsa4480_probe(struct i2c_client *client) struct typec_switch_desc sw_desc = { }; struct typec_mux_desc mux_desc = { }; struct fsa4480 *fsa; + int val = 0; int ret;
fsa = devm_kzalloc(dev, sizeof(*fsa), GFP_KERNEL); @@ -268,6 +273,15 @@ static int fsa4480_probe(struct i2c_client *client) if (IS_ERR(fsa->regmap)) return dev_err_probe(dev, PTR_ERR(fsa->regmap), "failed to initialize regmap\n");
+ ret = regmap_read(fsa->regmap, FSA4480_DEVICE_ID, &val); + if (ret || !val) + return dev_err_probe(dev, -ENODEV, "FSA4480 not found\n"); + + dev_dbg(dev, "Found FSA4480 v%lu.%lu (Vendor ID = %lu)\n", + FIELD_GET(FSA4480_DEVICE_ID_VERSION_ID, val), + FIELD_GET(FSA4480_DEVICE_ID_REV_ID, val), + FIELD_GET(FSA4480_DEVICE_ID_VENDOR_ID, val)); + /* Safe mode */ fsa->cur_enable = FSA4480_ENABLE_DEVICE | FSA4480_ENABLE_USB; fsa->mode = TYPEC_STATE_SAFE; diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 82650c11e451..302a89aeb258 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -745,6 +745,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag * */ if (usb_pipedevice(urb->pipe) == 0) { + struct usb_device *old; __u8 type = usb_pipetype(urb->pipe); struct usb_ctrlrequest *ctrlreq = (struct usb_ctrlrequest *) urb->setup_packet; @@ -755,14 +756,15 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag goto no_need_xmit; }
+ old = vdev->udev; switch (ctrlreq->bRequest) { case USB_REQ_SET_ADDRESS: /* set_address may come when a device is reset */ dev_info(dev, "SetAddress Request (%d) to port %d\n", ctrlreq->wValue, vdev->rhport);
- usb_put_dev(vdev->udev); vdev->udev = usb_get_dev(urb->dev); + usb_put_dev(old);
spin_lock(&vdev->ud.lock); vdev->ud.status = VDEV_ST_USED; @@ -781,8 +783,8 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag usbip_dbg_vhci_hc( "Not yet?:Get_Descriptor to device 0 (get max pipe size)\n");
- usb_put_dev(vdev->udev); vdev->udev = usb_get_dev(urb->dev); + usb_put_dev(old); goto out;
default: @@ -1067,6 +1069,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud) static void vhci_device_reset(struct usbip_device *ud) { struct vhci_device *vdev = container_of(ud, struct vhci_device, ud); + struct usb_device *old = vdev->udev; unsigned long flags;
spin_lock_irqsave(&ud->lock, flags); @@ -1074,8 +1077,8 @@ static void vhci_device_reset(struct usbip_device *ud) vdev->speed = 0; vdev->devid = 0;
- usb_put_dev(vdev->udev); vdev->udev = NULL; + usb_put_dev(old);
if (ud->tcp_socket) { sockfd_put(ud->tcp_socket); diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 63a53680a85c..6b9c12acf438 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -1483,13 +1483,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
notify = ops->get_vq_notification(vdpa, index);
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (remap_pfn_range(vma, vmf->address & PAGE_MASK, - PFN_DOWN(notify.addr), PAGE_SIZE, - vma->vm_page_prot)) - return VM_FAULT_SIGBUS; - - return VM_FAULT_NOPAGE; + return vmf_insert_pfn(vma, vmf->address & PAGE_MASK, PFN_DOWN(notify.addr)); }
static const struct vm_operations_struct vhost_vdpa_vm_ops = { diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 67dfa4778864..c9c620e32fa8 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -845,7 +845,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file, #ifdef CONFIG_XEN_PRIVCMD_EVENTFD /* Irqfd support */ static struct workqueue_struct *irqfd_cleanup_wq; -static DEFINE_MUTEX(irqfds_lock); +static DEFINE_SPINLOCK(irqfds_lock); static LIST_HEAD(irqfds_list);
struct privcmd_kernel_irqfd { @@ -909,9 +909,11 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key) irqfd_inject(kirqfd);
if (flags & EPOLLHUP) { - mutex_lock(&irqfds_lock); + unsigned long flags; + + spin_lock_irqsave(&irqfds_lock, flags); irqfd_deactivate(kirqfd); - mutex_unlock(&irqfds_lock); + spin_unlock_irqrestore(&irqfds_lock, flags); }
return 0; @@ -929,6 +931,7 @@ irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt) static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd) { struct privcmd_kernel_irqfd *kirqfd, *tmp; + unsigned long flags; __poll_t events; struct fd f; void *dm_op; @@ -968,18 +971,18 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd) init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup); init_poll_funcptr(&kirqfd->pt, irqfd_poll_func);
- mutex_lock(&irqfds_lock); + spin_lock_irqsave(&irqfds_lock, flags);
list_for_each_entry(tmp, &irqfds_list, list) { if (kirqfd->eventfd == tmp->eventfd) { ret = -EBUSY; - mutex_unlock(&irqfds_lock); + spin_unlock_irqrestore(&irqfds_lock, flags); goto error_eventfd; } }
list_add_tail(&kirqfd->list, &irqfds_list); - mutex_unlock(&irqfds_lock); + spin_unlock_irqrestore(&irqfds_lock, flags);
/* * Check if there was an event already pending on the eventfd before we @@ -1011,12 +1014,13 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd) { struct privcmd_kernel_irqfd *kirqfd; struct eventfd_ctx *eventfd; + unsigned long flags;
eventfd = eventfd_ctx_fdget(irqfd->fd); if (IS_ERR(eventfd)) return PTR_ERR(eventfd);
- mutex_lock(&irqfds_lock); + spin_lock_irqsave(&irqfds_lock, flags);
list_for_each_entry(kirqfd, &irqfds_list, list) { if (kirqfd->eventfd == eventfd) { @@ -1025,7 +1029,7 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd) } }
- mutex_unlock(&irqfds_lock); + spin_unlock_irqrestore(&irqfds_lock, flags);
eventfd_ctx_put(eventfd);
@@ -1073,13 +1077,14 @@ static int privcmd_irqfd_init(void) static void privcmd_irqfd_exit(void) { struct privcmd_kernel_irqfd *kirqfd, *tmp; + unsigned long flags;
- mutex_lock(&irqfds_lock); + spin_lock_irqsave(&irqfds_lock, flags);
list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list) irqfd_deactivate(kirqfd);
- mutex_unlock(&irqfds_lock); + spin_unlock_irqrestore(&irqfds_lock, flags);
destroy_workqueue(irqfd_cleanup_wq); } diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 1a49b9232990..8a791b648ac5 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -321,7 +321,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && trans->transid != fs_info->running_transaction->transid); WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && - trans->transid != root->last_trans); + trans->transid != btrfs_get_root_last_trans(root));
level = btrfs_header_level(buf); if (level == 0) @@ -551,7 +551,7 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans, WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && trans->transid != fs_info->running_transaction->transid); WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && - trans->transid != root->last_trans); + trans->transid != btrfs_get_root_last_trans(root));
level = btrfs_header_level(buf);
@@ -620,10 +620,16 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans, atomic_inc(&cow->refs); rcu_assign_pointer(root->node, cow);
- btrfs_free_tree_block(trans, btrfs_root_id(root), buf, - parent_start, last_ref); + ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf, + parent_start, last_ref); free_extent_buffer(buf); add_root_to_dirty_list(root); + if (ret < 0) { + btrfs_tree_unlock(cow); + free_extent_buffer(cow); + btrfs_abort_transaction(trans, ret); + return ret; + } } else { WARN_ON(trans->transid != btrfs_header_generation(parent)); ret = btrfs_tree_mod_log_insert_key(parent, parent_slot, @@ -648,8 +654,14 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans, return ret; } } - btrfs_free_tree_block(trans, btrfs_root_id(root), buf, - parent_start, last_ref); + ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf, + parent_start, last_ref); + if (ret < 0) { + btrfs_tree_unlock(cow); + free_extent_buffer(cow); + btrfs_abort_transaction(trans, ret); + return ret; + } } if (unlock_orig) btrfs_tree_unlock(buf); @@ -983,9 +995,13 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, free_extent_buffer(mid);
root_sub_used_bytes(root); - btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); + ret = btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); /* once for the root ptr */ free_extent_buffer_stale(mid); + if (ret < 0) { + btrfs_abort_transaction(trans, ret); + goto out; + } return 0; } if (btrfs_header_nritems(mid) > @@ -1053,10 +1069,14 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, goto out; } root_sub_used_bytes(root); - btrfs_free_tree_block(trans, btrfs_root_id(root), right, - 0, 1); + ret = btrfs_free_tree_block(trans, btrfs_root_id(root), + right, 0, 1); free_extent_buffer_stale(right); right = NULL; + if (ret < 0) { + btrfs_abort_transaction(trans, ret); + goto out; + } } else { struct btrfs_disk_key right_key; btrfs_node_key(right, &right_key, 0); @@ -1111,9 +1131,13 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, goto out; } root_sub_used_bytes(root); - btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); + ret = btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); free_extent_buffer_stale(mid); mid = NULL; + if (ret < 0) { + btrfs_abort_transaction(trans, ret); + goto out; + } } else { /* update the parent key to reflect our changes */ struct btrfs_disk_key mid_key; @@ -2883,7 +2907,11 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, old = root->node; ret = btrfs_tree_mod_log_insert_root(root->node, c, false); if (ret < 0) { - btrfs_free_tree_block(trans, btrfs_root_id(root), c, 0, 1); + int ret2; + + ret2 = btrfs_free_tree_block(trans, btrfs_root_id(root), c, 0, 1); + if (ret2 < 0) + btrfs_abort_transaction(trans, ret2); btrfs_tree_unlock(c); free_extent_buffer(c); return ret; @@ -4452,9 +4480,12 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans, root_sub_used_bytes(root);
atomic_inc(&leaf->refs); - btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1); + ret = btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1); free_extent_buffer_stale(leaf); - return 0; + if (ret < 0) + btrfs_abort_transaction(trans, ret); + + return ret; } /* * delete the item at the leaf level in path. If that empties diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index c03c58246033..a56209d275c1 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -354,6 +354,16 @@ static inline void btrfs_set_root_last_log_commit(struct btrfs_root *root, int c WRITE_ONCE(root->last_log_commit, commit_id); }
+static inline u64 btrfs_get_root_last_trans(const struct btrfs_root *root) +{ + return READ_ONCE(root->last_trans); +} + +static inline void btrfs_set_root_last_trans(struct btrfs_root *root, u64 transid) +{ + WRITE_ONCE(root->last_trans, transid); +} + /* * Structure that conveys information about an extent that is going to replace * all the extents in a file range. @@ -447,6 +457,7 @@ struct btrfs_file_private { void *filldir_buf; u64 last_index; struct extent_state *llseek_cached_state; + bool fsync_skip_inode_lock; };
static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info) diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c index 407ccec3e57e..f664678c71d1 100644 --- a/fs/btrfs/defrag.c +++ b/fs/btrfs/defrag.c @@ -139,7 +139,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, if (trans) transid = trans->transid; else - transid = inode->root->last_trans; + transid = btrfs_get_root_last_trans(root);
defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS); if (!defrag) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index cabb558dbdaa..3791813dc7b6 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -658,7 +658,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, root->state = 0; RB_CLEAR_NODE(&root->rb_node);
- root->last_trans = 0; + btrfs_set_root_last_trans(root, 0); root->free_objectid = 0; root->nr_delalloc_inodes = 0; root->nr_ordered_extents = 0; @@ -1010,7 +1010,7 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans, return ret; }
- log_root->last_trans = trans->transid; + btrfs_set_root_last_trans(log_root, trans->transid); log_root->root_key.offset = btrfs_root_id(root);
inode_item = &log_root->root_item.inode; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b75e14f399a0..844b677d054e 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -104,10 +104,7 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_path *path; - struct btrfs_extent_item *ei; - struct extent_buffer *leaf; struct btrfs_key key; - u32 item_size; u64 num_refs; u64 extent_flags; u64 owner = 0; @@ -157,16 +154,11 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, }
if (ret == 0) { - leaf = path->nodes[0]; - item_size = btrfs_item_size(leaf, path->slots[0]); - if (item_size >= sizeof(*ei)) { - ei = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_extent_item); - num_refs = btrfs_extent_refs(leaf, ei); - extent_flags = btrfs_extent_flags(leaf, ei); - owner = btrfs_get_extent_owner_root(fs_info, leaf, - path->slots[0]); - } else { + struct extent_buffer *leaf = path->nodes[0]; + struct btrfs_extent_item *ei; + const u32 item_size = btrfs_item_size(leaf, path->slots[0]); + + if (unlikely(item_size < sizeof(*ei))) { ret = -EUCLEAN; btrfs_err(fs_info, "unexpected extent item size, has %u expect >= %zu", @@ -179,6 +171,10 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, goto out_free; }
+ ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); + num_refs = btrfs_extent_refs(leaf, ei); + extent_flags = btrfs_extent_flags(leaf, ei); + owner = btrfs_get_extent_owner_root(fs_info, leaf, path->slots[0]); BUG_ON(num_refs == 0); } else { num_refs = 0; @@ -3420,10 +3416,10 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, return 0; }
-void btrfs_free_tree_block(struct btrfs_trans_handle *trans, - u64 root_id, - struct extent_buffer *buf, - u64 parent, int last_ref) +int btrfs_free_tree_block(struct btrfs_trans_handle *trans, + u64 root_id, + struct extent_buffer *buf, + u64 parent, int last_ref) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_block_group *bg; @@ -3450,11 +3446,12 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf), 0, false); btrfs_ref_tree_mod(fs_info, &generic_ref); ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL); - BUG_ON(ret); /* -ENOMEM */ + if (ret < 0) + return ret; }
if (!last_ref) - return; + return 0;
if (btrfs_header_generation(buf) != trans->transid) goto out; @@ -3511,6 +3508,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, * matter anymore. */ clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); + return 0; }
/* Can return -ENOMEM */ @@ -5644,7 +5642,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans, struct walk_control *wc) { struct btrfs_fs_info *fs_info = root->fs_info; - int ret; + int ret = 0; int level = wc->level; struct extent_buffer *eb = path->nodes[level]; u64 parent = 0; @@ -5731,12 +5729,14 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans, goto owner_mismatch; }
- btrfs_free_tree_block(trans, btrfs_root_id(root), eb, parent, - wc->refs[level] == 1); + ret = btrfs_free_tree_block(trans, btrfs_root_id(root), eb, parent, + wc->refs[level] == 1); + if (ret < 0) + btrfs_abort_transaction(trans, ret); out: wc->refs[level] = 0; wc->flags[level] = 0; - return 0; + return ret;
owner_mismatch: btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu", diff --git a/fs/btrfs/extent-tree.h b/fs/btrfs/extent-tree.h index af9f8800d5ac..2ad51130c037 100644 --- a/fs/btrfs/extent-tree.h +++ b/fs/btrfs/extent-tree.h @@ -127,10 +127,10 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, u64 empty_size, u64 reloc_src_root, enum btrfs_lock_nesting nest); -void btrfs_free_tree_block(struct btrfs_trans_handle *trans, - u64 root_id, - struct extent_buffer *buf, - u64 parent, int last_ref); +int btrfs_free_tree_block(struct btrfs_trans_handle *trans, + u64 root_id, + struct extent_buffer *buf, + u64 parent, int last_ref); int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 owner, u64 offset, u64 ram_bytes, diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 958155cc43a8..0486b1f91124 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2246,10 +2246,8 @@ void extent_write_locked_range(struct inode *inode, struct page *locked_page,
page = find_get_page(mapping, cur >> PAGE_SHIFT); ASSERT(PageLocked(page)); - if (pages_dirty && page != locked_page) { + if (pages_dirty && page != locked_page) ASSERT(PageDirty(page)); - clear_page_dirty_for_io(page); - }
ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl, i_size, &nr); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index d90138683a0a..ca434f0cd27f 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1550,21 +1550,37 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) * So here we disable page faults in the iov_iter and then retry if we * got -EFAULT, faulting in the pages before the retry. */ +again: from->nofault = true; dio = btrfs_dio_write(iocb, from, written); from->nofault = false;
- /* - * iomap_dio_complete() will call btrfs_sync_file() if we have a dsync - * iocb, and that needs to lock the inode. So unlock it before calling - * iomap_dio_complete() to avoid a deadlock. - */ - btrfs_inode_unlock(BTRFS_I(inode), ilock_flags); - - if (IS_ERR_OR_NULL(dio)) + if (IS_ERR_OR_NULL(dio)) { ret = PTR_ERR_OR_ZERO(dio); - else + } else { + struct btrfs_file_private stack_private = { 0 }; + struct btrfs_file_private *private; + const bool have_private = (file->private_data != NULL); + + if (!have_private) + file->private_data = &stack_private; + + /* + * If we have a synchoronous write, we must make sure the fsync + * triggered by the iomap_dio_complete() call below doesn't + * deadlock on the inode lock - we are already holding it and we + * can't call it after unlocking because we may need to complete + * partial writes due to the input buffer (or parts of it) not + * being already faulted in. + */ + private = file->private_data; + private->fsync_skip_inode_lock = true; ret = iomap_dio_complete(dio); + private->fsync_skip_inode_lock = false; + + if (!have_private) + file->private_data = NULL; + }
/* No increment (+=) because iomap returns a cumulative value. */ if (ret > 0) @@ -1591,10 +1607,12 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) } else { fault_in_iov_iter_readable(from, left); prev_left = left; - goto relock; + goto again; } }
+ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags); + /* * If 'ret' is -ENOTBLK or we have not written all data, then it means * we must fallback to buffered IO. @@ -1793,6 +1811,7 @@ static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx) */ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { + struct btrfs_file_private *private = file->private_data; struct dentry *dentry = file_dentry(file); struct inode *inode = d_inode(dentry); struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); @@ -1802,6 +1821,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) int ret = 0, err; u64 len; bool full_sync; + const bool skip_ilock = (private ? private->fsync_skip_inode_lock : false);
trace_btrfs_sync_file(file, datasync);
@@ -1829,7 +1849,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) if (ret) goto out;
- btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); + if (skip_ilock) + down_write(&BTRFS_I(inode)->i_mmap_lock); + else + btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
atomic_inc(&root->log_batch);
@@ -1853,7 +1876,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) */ ret = start_ordered_ops(inode, start, end); if (ret) { - btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); + if (skip_ilock) + up_write(&BTRFS_I(inode)->i_mmap_lock); + else + btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); goto out; }
@@ -1982,7 +2008,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) * file again, but that will end up using the synchronization * inside btrfs_sync_log to keep things safe. */ - btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); + if (skip_ilock) + up_write(&BTRFS_I(inode)->i_mmap_lock); + else + btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
if (ret == BTRFS_NO_LOG_SYNC) { ret = btrfs_end_transaction(trans); @@ -2051,7 +2080,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
out_release_extents: btrfs_release_log_ctx_extents(&ctx); - btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); + if (skip_ilock) + up_write(&BTRFS_I(inode)->i_mmap_lock); + else + btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); goto out; }
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index d674f2106593..62c3dea9572a 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -858,6 +858,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, spin_unlock(&ctl->tree_lock); btrfs_err(fs_info, "Duplicate entries in free space cache, dumping"); + kmem_cache_free(btrfs_free_space_bitmap_cachep, e->bitmap); kmem_cache_free(btrfs_free_space_cachep, e); goto free_cache; } diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c index 90f2938bd743..7ba50e133921 100644 --- a/fs/btrfs/free-space-tree.c +++ b/fs/btrfs/free-space-tree.c @@ -1300,10 +1300,14 @@ int btrfs_delete_free_space_tree(struct btrfs_fs_info *fs_info) btrfs_tree_lock(free_space_root->node); btrfs_clear_buffer_dirty(trans, free_space_root->node); btrfs_tree_unlock(free_space_root->node); - btrfs_free_tree_block(trans, btrfs_root_id(free_space_root), - free_space_root->node, 0, 1); - + ret = btrfs_free_tree_block(trans, btrfs_root_id(free_space_root), + free_space_root->node, 0, 1); btrfs_put_root(free_space_root); + if (ret < 0) { + btrfs_abort_transaction(trans, ret); + btrfs_end_transaction(trans); + return ret; + }
return btrfs_commit_transaction(trans); } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index efd5d6e9589e..c1b0556e4036 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -719,6 +719,8 @@ static noinline int create_subvol(struct mnt_idmap *idmap, ret = btrfs_insert_root(trans, fs_info->tree_root, &key, root_item); if (ret) { + int ret2; + /* * Since we don't abort the transaction in this case, free the * tree block so that we don't leak space and leave the @@ -729,7 +731,9 @@ static noinline int create_subvol(struct mnt_idmap *idmap, btrfs_tree_lock(leaf); btrfs_clear_buffer_dirty(trans, leaf); btrfs_tree_unlock(leaf); - btrfs_free_tree_block(trans, objectid, leaf, 0, 1); + ret2 = btrfs_free_tree_block(trans, objectid, leaf, 0, 1); + if (ret2 < 0) + btrfs_abort_transaction(trans, ret2); free_extent_buffer(leaf); goto out; } diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 7e46aa8a0444..b876156fcd03 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -14,7 +14,7 @@
struct root_name_map { u64 id; - char name[16]; + const char *name; };
static const struct root_name_map root_map[] = { diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 39a15cca58ca..29d6ca3b874e 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1446,9 +1446,11 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info) btrfs_tree_lock(quota_root->node); btrfs_clear_buffer_dirty(trans, quota_root->node); btrfs_tree_unlock(quota_root->node); - btrfs_free_tree_block(trans, btrfs_root_id(quota_root), - quota_root->node, 0, 1); + ret = btrfs_free_tree_block(trans, btrfs_root_id(quota_root), + quota_root->node, 0, 1);
+ if (ret < 0) + btrfs_abort_transaction(trans, ret);
out: btrfs_put_root(quota_root); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 8b24bb5a0aa1..f2935252b981 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -817,7 +817,7 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, goto abort; } set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state); - reloc_root->last_trans = trans->transid; + btrfs_set_root_last_trans(reloc_root, trans->transid); return reloc_root; fail: kfree(root_item); @@ -864,7 +864,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, */ if (root->reloc_root) { reloc_root = root->reloc_root; - reloc_root->last_trans = trans->transid; + btrfs_set_root_last_trans(reloc_root, trans->transid); return 0; }
@@ -1739,7 +1739,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, * btrfs_update_reloc_root() and update our root item * appropriately. */ - reloc_root->last_trans = trans->transid; + btrfs_set_root_last_trans(reloc_root, trans->transid); trans->block_rsv = rc->block_rsv;
replaced = 0; @@ -2082,7 +2082,7 @@ static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, struct btrfs_root *root; int ret;
- if (reloc_root->last_trans == trans->transid) + if (btrfs_get_root_last_trans(reloc_root) == trans->transid) return 0;
root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 3388c836b9a5..76117bb2c726 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -405,7 +405,7 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans, int ret = 0;
if ((test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && - root->last_trans < trans->transid) || force) { + btrfs_get_root_last_trans(root) < trans->transid) || force) { WARN_ON(!force && root->commit_root != root->node);
/* @@ -421,7 +421,7 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans, smp_wmb();
spin_lock(&fs_info->fs_roots_radix_lock); - if (root->last_trans == trans->transid && !force) { + if (btrfs_get_root_last_trans(root) == trans->transid && !force) { spin_unlock(&fs_info->fs_roots_radix_lock); return 0; } @@ -429,7 +429,7 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans, (unsigned long)btrfs_root_id(root), BTRFS_ROOT_TRANS_TAG); spin_unlock(&fs_info->fs_roots_radix_lock); - root->last_trans = trans->transid; + btrfs_set_root_last_trans(root, trans->transid);
/* this is pretty tricky. We don't want to * take the relocation lock in btrfs_record_root_in_trans @@ -491,7 +491,7 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, * and barriers */ smp_rmb(); - if (root->last_trans == trans->transid && + if (btrfs_get_root_last_trans(root) == trans->transid && !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state)) return 0;
diff --git a/fs/buffer.c b/fs/buffer.c index 8c19e705b9c3..645f0387dfe1 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2187,6 +2187,8 @@ static void __block_commit_write(struct folio *folio, size_t from, size_t to) struct buffer_head *bh, *head;
bh = head = folio_buffers(folio); + if (!bh) + return; blocksize = bh->b_size;
block_start = 0; diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index d5bd1e3a5d36..e7a09a99837b 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -1410,7 +1410,11 @@ int ext4_inlinedir_to_tree(struct file *dir_file, hinfo->hash = EXT4_DIRENT_HASH(de); hinfo->minor_hash = EXT4_DIRENT_MINOR_HASH(de); } else { - ext4fs_dirhash(dir, de->name, de->name_len, hinfo); + err = ext4fs_dirhash(dir, de->name, de->name_len, hinfo); + if (err) { + ret = err; + goto out; + } } if ((hinfo->hash < start_hash) || ((hinfo->hash == start_hash) && diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 4b0d64a76e88..238e19633823 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2973,6 +2973,11 @@ static int ext4_da_do_write_end(struct address_space *mapping, bool disksize_changed = false; loff_t new_i_size;
+ if (unlikely(!folio_buffers(folio))) { + folio_unlock(folio); + folio_put(folio); + return -EIO; + } /* * block_write_end() will mark the inode as dirty with I_DIRTY_PAGES * flag, which all that's needed to trigger page writeback. diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index ae5b544ed0cc..c8d9d85e0e87 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -399,6 +399,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS); if (!tmp) { brelse(new_bh); + free_buffer_head(new_bh); return -ENOMEM; } spin_lock(&jh_in->b_state_lock); diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index c848ebe5d08f..0f9b4f7b56cd 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -2053,8 +2053,7 @@ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info) continue; }
- ret = svc_xprt_create_from_sa(serv, xcl_name, net, sa, - SVC_SOCK_ANONYMOUS, + ret = svc_xprt_create_from_sa(serv, xcl_name, net, sa, 0, get_current_cred()); /* always save the latest error */ if (ret < 0) diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c index c71ae5c04306..4a20e92474b2 100644 --- a/fs/smb/client/cifs_debug.c +++ b/fs/smb/client/cifs_debug.c @@ -1072,7 +1072,7 @@ static int cifs_security_flags_proc_open(struct inode *inode, struct file *file) static void cifs_security_flags_handle_must_flags(unsigned int *flags) { - unsigned int signflags = *flags & CIFSSEC_MUST_SIGN; + unsigned int signflags = *flags & (CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL);
if ((*flags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) *flags = CIFSSEC_MUST_KRB5; diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h index a865941724c0..d4bcc7da700c 100644 --- a/fs/smb/client/cifsglob.h +++ b/fs/smb/client/cifsglob.h @@ -1901,7 +1901,7 @@ static inline bool is_replayable_error(int error) #define CIFSSEC_MAY_SIGN 0x00001 #define CIFSSEC_MAY_NTLMV2 0x00004 #define CIFSSEC_MAY_KRB5 0x00008 -#define CIFSSEC_MAY_SEAL 0x00040 /* not supported yet */ +#define CIFSSEC_MAY_SEAL 0x00040 #define CIFSSEC_MAY_NTLMSSP 0x00080 /* raw ntlmssp with ntlmv2 */
#define CIFSSEC_MUST_SIGN 0x01001 @@ -1911,11 +1911,11 @@ require use of the stronger protocol */ #define CIFSSEC_MUST_NTLMV2 0x04004 #define CIFSSEC_MUST_KRB5 0x08008 #ifdef CONFIG_CIFS_UPCALL -#define CIFSSEC_MASK 0x8F08F /* flags supported if no weak allowed */ +#define CIFSSEC_MASK 0xCF0CF /* flags supported if no weak allowed */ #else -#define CIFSSEC_MASK 0x87087 /* flags supported if no weak allowed */ +#define CIFSSEC_MASK 0xC70C7 /* flags supported if no weak allowed */ #endif /* UPCALL */ -#define CIFSSEC_MUST_SEAL 0x40040 /* not supported yet */ +#define CIFSSEC_MUST_SEAL 0x40040 #define CIFSSEC_MUST_NTLMSSP 0x80080 /* raw ntlmssp with ntlmv2 */
#define CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP | CIFSSEC_MAY_SEAL) diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c index 4a8aa1de9522..dd0afa23734c 100644 --- a/fs/smb/client/inode.c +++ b/fs/smb/client/inode.c @@ -1042,13 +1042,26 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data, }
rc = -EOPNOTSUPP; - switch ((data->reparse.tag = tag)) { - case 0: /* SMB1 symlink */ + data->reparse.tag = tag; + if (!data->reparse.tag) { if (server->ops->query_symlink) { rc = server->ops->query_symlink(xid, tcon, cifs_sb, full_path, &data->symlink_target); } + if (rc == -EOPNOTSUPP) + data->reparse.tag = IO_REPARSE_TAG_INTERNAL; + } + + switch (data->reparse.tag) { + case 0: /* SMB1 symlink */ + break; + case IO_REPARSE_TAG_INTERNAL: + rc = 0; + if (le32_to_cpu(data->fi.Attributes) & ATTR_DIRECTORY) { + cifs_create_junction_fattr(fattr, sb); + goto out; + } break; case IO_REPARSE_TAG_MOUNT_POINT: cifs_create_junction_fattr(fattr, sb); diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c index 07c468ddb88a..65d4b72b4d51 100644 --- a/fs/smb/client/misc.c +++ b/fs/smb/client/misc.c @@ -1288,6 +1288,7 @@ int cifs_inval_name_dfs_link_error(const unsigned int xid, const char *full_path, bool *islink) { + struct TCP_Server_Info *server = tcon->ses->server; struct cifs_ses *ses = tcon->ses; size_t len; char *path; @@ -1304,12 +1305,12 @@ int cifs_inval_name_dfs_link_error(const unsigned int xid, !is_tcon_dfs(tcon)) return 0;
- spin_lock(&tcon->tc_lock); - if (!tcon->origin_fullpath) { - spin_unlock(&tcon->tc_lock); + spin_lock(&server->srv_lock); + if (!server->leaf_fullpath) { + spin_unlock(&server->srv_lock); return 0; } - spin_unlock(&tcon->tc_lock); + spin_unlock(&server->srv_lock);
/* * Slow path - tcon is DFS and @full_path has prefix path, so attempt diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c index a0ffbda90733..689d8a506d45 100644 --- a/fs/smb/client/reparse.c +++ b/fs/smb/client/reparse.c @@ -505,6 +505,10 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb, }
switch (tag) { + case IO_REPARSE_TAG_INTERNAL: + if (!(fattr->cf_cifsattrs & ATTR_DIRECTORY)) + return false; + fallthrough; case IO_REPARSE_TAG_DFS: case IO_REPARSE_TAG_DFSR: case IO_REPARSE_TAG_MOUNT_POINT: diff --git a/fs/smb/client/reparse.h b/fs/smb/client/reparse.h index 6b55d1df9e2f..2c0644bc4e65 100644 --- a/fs/smb/client/reparse.h +++ b/fs/smb/client/reparse.h @@ -12,6 +12,12 @@ #include "fs_context.h" #include "cifsglob.h"
+/* + * Used only by cifs.ko to ignore reparse points from files when client or + * server doesn't support FSCTL_GET_REPARSE_POINT. + */ +#define IO_REPARSE_TAG_INTERNAL ((__u32)~0U) + static inline dev_t reparse_nfs_mkdev(struct reparse_posix_data *buf) { u64 v = le64_to_cpu(*(__le64 *)buf->DataBuffer); @@ -78,10 +84,19 @@ static inline u32 reparse_mode_wsl_tag(mode_t mode) static inline bool reparse_inode_match(struct inode *inode, struct cifs_fattr *fattr) { + struct cifsInodeInfo *cinode = CIFS_I(inode); struct timespec64 ctime = inode_get_ctime(inode);
- return (CIFS_I(inode)->cifsAttrs & ATTR_REPARSE) && - CIFS_I(inode)->reparse_tag == fattr->cf_cifstag && + /* + * Do not match reparse tags when client or server doesn't support + * FSCTL_GET_REPARSE_POINT. @fattr->cf_cifstag should contain correct + * reparse tag from query dir response but the client won't be able to + * read the reparse point data anyway. This spares us a revalidation. + */ + if (cinode->reparse_tag != IO_REPARSE_TAG_INTERNAL && + cinode->reparse_tag != fattr->cf_cifstag) + return false; + return (cinode->cifsAttrs & ATTR_REPARSE) && timespec64_equal(&ctime, &fattr->cf_ctime); }
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c index 5c02a12251c8..062b86a4936f 100644 --- a/fs/smb/client/smb2inode.c +++ b/fs/smb/client/smb2inode.c @@ -930,6 +930,8 @@ int smb2_query_path_info(const unsigned int xid,
switch (rc) { case 0: + rc = parse_create_response(data, cifs_sb, &out_iov[0]); + break; case -EOPNOTSUPP: /* * BB TODO: When support for special files added to Samba diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c index bb84a89e5905..896147ba6660 100644 --- a/fs/smb/client/smb2pdu.c +++ b/fs/smb/client/smb2pdu.c @@ -82,6 +82,9 @@ int smb3_encryption_required(const struct cifs_tcon *tcon) if (tcon->seal && (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) return 1; + if (((global_secflags & CIFSSEC_MUST_SEAL) == CIFSSEC_MUST_SEAL) && + (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) + return 1; return 0; }
diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c index 5d88c184f0fc..01e99e98457d 100644 --- a/fs/tracefs/event_inode.c +++ b/fs/tracefs/event_inode.c @@ -112,7 +112,7 @@ static void release_ei(struct kref *ref) entry->release(entry->name, ei->data); }
- call_rcu(&ei->rcu, free_ei_rcu); + call_srcu(&eventfs_srcu, &ei->rcu, free_ei_rcu); }
static inline void put_ei(struct eventfs_inode *ei) @@ -736,7 +736,7 @@ struct eventfs_inode *eventfs_create_dir(const char *name, struct eventfs_inode /* Was the parent freed? */ if (list_empty(&ei->list)) { cleanup_ei(ei); - ei = NULL; + ei = ERR_PTR(-EBUSY); } return ei; } diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c index 7c29f4afc23d..507a61004ed3 100644 --- a/fs/tracefs/inode.c +++ b/fs/tracefs/inode.c @@ -42,7 +42,7 @@ static struct inode *tracefs_alloc_inode(struct super_block *sb) struct tracefs_inode *ti; unsigned long flags;
- ti = kmem_cache_alloc(tracefs_inode_cachep, GFP_KERNEL); + ti = alloc_inode_sb(sb, tracefs_inode_cachep, GFP_KERNEL); if (!ti) return NULL;
@@ -53,15 +53,14 @@ static struct inode *tracefs_alloc_inode(struct super_block *sb) return &ti->vfs_inode; }
-static void tracefs_free_inode_rcu(struct rcu_head *rcu) +static void tracefs_free_inode(struct inode *inode) { - struct tracefs_inode *ti; + struct tracefs_inode *ti = get_tracefs(inode);
- ti = container_of(rcu, struct tracefs_inode, rcu); kmem_cache_free(tracefs_inode_cachep, ti); }
-static void tracefs_free_inode(struct inode *inode) +static void tracefs_destroy_inode(struct inode *inode) { struct tracefs_inode *ti = get_tracefs(inode); unsigned long flags; @@ -69,8 +68,6 @@ static void tracefs_free_inode(struct inode *inode) spin_lock_irqsave(&tracefs_inode_lock, flags); list_del_rcu(&ti->list); spin_unlock_irqrestore(&tracefs_inode_lock, flags); - - call_rcu(&ti->rcu, tracefs_free_inode_rcu); }
static ssize_t default_read_file(struct file *file, char __user *buf, @@ -445,6 +442,7 @@ static int tracefs_drop_inode(struct inode *inode) static const struct super_operations tracefs_super_operations = { .alloc_inode = tracefs_alloc_inode, .free_inode = tracefs_free_inode, + .destroy_inode = tracefs_destroy_inode, .drop_inode = tracefs_drop_inode, .statfs = simple_statfs, .show_options = tracefs_show_options, diff --git a/fs/tracefs/internal.h b/fs/tracefs/internal.h index f704d8348357..d83c2a25f288 100644 --- a/fs/tracefs/internal.h +++ b/fs/tracefs/internal.h @@ -10,10 +10,7 @@ enum { };
struct tracefs_inode { - union { - struct inode vfs_inode; - struct rcu_head rcu; - }; + struct inode vfs_inode; /* The below gets initialized with memset_after(ti, 0, vfs_inode) */ struct list_head list; unsigned long flags; diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index 558ad046972a..bb471ec36404 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c @@ -18,6 +18,7 @@ #include "udfdecl.h"
#include <linux/bitops.h> +#include <linux/overflow.h>
#include "udf_i.h" #include "udf_sb.h" @@ -140,7 +141,6 @@ static void udf_bitmap_free_blocks(struct super_block *sb, { struct udf_sb_info *sbi = UDF_SB(sb); struct buffer_head *bh = NULL; - struct udf_part_map *partmap; unsigned long block; unsigned long block_group; unsigned long bit; @@ -149,19 +149,9 @@ static void udf_bitmap_free_blocks(struct super_block *sb, unsigned long overflow;
mutex_lock(&sbi->s_alloc_mutex); - partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; - if (bloc->logicalBlockNum + count < count || - (bloc->logicalBlockNum + count) > partmap->s_partition_len) { - udf_debug("%u < %d || %u + %u > %u\n", - bloc->logicalBlockNum, 0, - bloc->logicalBlockNum, count, - partmap->s_partition_len); - goto error_return; - } - + /* We make sure this cannot overflow when mounting the filesystem */ block = bloc->logicalBlockNum + offset + (sizeof(struct spaceBitmapDesc) << 3); - do { overflow = 0; block_group = block >> (sb->s_blocksize_bits + 3); @@ -391,7 +381,6 @@ static void udf_table_free_blocks(struct super_block *sb, uint32_t count) { struct udf_sb_info *sbi = UDF_SB(sb); - struct udf_part_map *partmap; uint32_t start, end; uint32_t elen; struct kernel_lb_addr eloc; @@ -400,16 +389,6 @@ static void udf_table_free_blocks(struct super_block *sb, struct udf_inode_info *iinfo;
mutex_lock(&sbi->s_alloc_mutex); - partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; - if (bloc->logicalBlockNum + count < count || - (bloc->logicalBlockNum + count) > partmap->s_partition_len) { - udf_debug("%u < %d || %u + %u > %u\n", - bloc->logicalBlockNum, 0, - bloc->logicalBlockNum, count, - partmap->s_partition_len); - goto error_return; - } - iinfo = UDF_I(table); udf_add_free_space(sb, sbi->s_partition, count);
@@ -684,6 +663,17 @@ void udf_free_blocks(struct super_block *sb, struct inode *inode, { uint16_t partition = bloc->partitionReferenceNum; struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; + uint32_t blk; + + if (check_add_overflow(bloc->logicalBlockNum, offset, &blk) || + check_add_overflow(blk, count, &blk) || + bloc->logicalBlockNum + count > map->s_partition_len) { + udf_debug("Invalid request to free blocks: (%d, %u), off %u, " + "len %u, partition len %u\n", + partition, bloc->logicalBlockNum, offset, count, + map->s_partition_len); + return; + }
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) { udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap, diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h index 7428cb43952d..9fc4fed9a19f 100644 --- a/include/linux/blk-integrity.h +++ b/include/linux/blk-integrity.h @@ -100,14 +100,13 @@ static inline bool blk_integrity_rq(struct request *rq) }
/* - * Return the first bvec that contains integrity data. Only drivers that are - * limited to a single integrity segment should use this helper. + * Return the current bvec that contains the integrity data. bip_iter may be + * advanced to iterate over the integrity data. */ -static inline struct bio_vec *rq_integrity_vec(struct request *rq) +static inline struct bio_vec rq_integrity_vec(struct request *rq) { - if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1)) - return NULL; - return rq->bio->bi_integrity->bip_vec; + return mp_bvec_iter_bvec(rq->bio->bi_integrity->bip_vec, + rq->bio->bi_integrity->bip_iter); } #else /* CONFIG_BLK_DEV_INTEGRITY */ static inline int blk_rq_count_integrity_sg(struct request_queue *q, @@ -167,9 +166,10 @@ static inline int blk_integrity_rq(struct request *rq) return 0; }
-static inline struct bio_vec *rq_integrity_vec(struct request *rq) +static inline struct bio_vec rq_integrity_vec(struct request *rq) { - return NULL; + /* the optimizer will remove all calls to this function */ + return (struct bio_vec){ }; } #endif /* CONFIG_BLK_DEV_INTEGRITY */ #endif /* _LINUX_BLK_INTEGRITY_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 0283cf366c2a..93ac1a859d69 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -629,6 +629,7 @@ struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; + struct list_head i_lru; /* inode LRU list */ kgid_t i_gid; unsigned int i_flags;
@@ -690,7 +691,6 @@ struct inode { u16 i_wb_frn_avg_time; u16 i_wb_frn_history; #endif - struct list_head i_lru; /* inode LRU list */ struct list_head i_sb_list; struct list_head i_wb_list; /* backing dev writeback list */ union { diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 942a587bb97e..677aea20d3e1 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2126,6 +2126,8 @@
#define PCI_VENDOR_ID_CHELSIO 0x1425
+#define PCI_VENDOR_ID_EDIMAX 0x1432 + #define PCI_VENDOR_ID_ADLINK 0x144a
#define PCI_VENDOR_ID_SAMSUNG 0x144d diff --git a/include/linux/profile.h b/include/linux/profile.h index 04ae5ebcb637..cb365a751861 100644 --- a/include/linux/profile.h +++ b/include/linux/profile.h @@ -11,7 +11,6 @@
#define CPU_PROFILING 1 #define SCHED_PROFILING 2 -#define SLEEP_PROFILING 3 #define KVM_PROFILING 4
struct proc_dir_entry; diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index dfd2399f2cde..61cb3de236af 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -209,7 +209,6 @@ void synchronize_rcu_tasks_rude(void);
#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false) void exit_tasks_rcu_start(void); -void exit_tasks_rcu_stop(void); void exit_tasks_rcu_finish(void); #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ #define rcu_tasks_classic_qs(t, preempt) do { } while (0) @@ -218,7 +217,6 @@ void exit_tasks_rcu_finish(void); #define call_rcu_tasks call_rcu #define synchronize_rcu_tasks synchronize_rcu static inline void exit_tasks_rcu_start(void) { } -static inline void exit_tasks_rcu_stop(void) { } static inline void exit_tasks_rcu_finish(void) { } #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 9df3e2973626..9435185c10ef 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -880,7 +880,6 @@ do { \ struct perf_event;
DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); -DECLARE_PER_CPU(int, bpf_kprobe_override);
extern int perf_trace_init(struct perf_event *event); extern void perf_trace_destroy(struct perf_event *event); diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index d1d7825318c3..6c395a2600e8 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -56,7 +56,6 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, unsigned int thlen = 0; unsigned int p_off = 0; unsigned int ip_proto; - u64 ret, remainder, gso_size;
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { @@ -99,16 +98,6 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset); u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16));
- if (hdr->gso_size) { - gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size); - ret = div64_u64_rem(skb->len, gso_size, &remainder); - if (!(ret && (hdr->gso_size > needed) && - ((remainder > needed) || (remainder == 0)))) { - return -EINVAL; - } - skb_shinfo(skb)->tx_flags |= SKBFL_SHARED_FRAG; - } - if (!pskb_may_pull(skb, needed)) return -EINVAL;
@@ -182,6 +171,11 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, if (gso_type != SKB_GSO_UDP_L4) return -EINVAL; break; + case SKB_GSO_TCPV4: + case SKB_GSO_TCPV6: + if (skb->csum_offset != offsetof(struct tcphdr, check)) + return -EINVAL; + break; }
/* Kernel has a special handling for GSO_BY_FRAGS. */ diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h index dc627ebf01df..347959585deb 100644 --- a/include/sound/cs35l56.h +++ b/include/sound/cs35l56.h @@ -267,13 +267,23 @@ struct cs35l56_base { bool fw_patched; bool secured; bool can_hibernate; - bool fw_owns_asp1; bool cal_data_valid; s8 cal_index; struct cirrus_amp_cal_data cal_data; struct gpio_desc *reset_gpio; };
+/* Temporary to avoid a build break with the HDA driver */ +static inline int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base) +{ + return 0; +} + +static inline bool cs35l56_is_otp_register(unsigned int reg) +{ + return (reg >> 16) == 3; +} + extern struct regmap_config cs35l56_regmap_i2c; extern struct regmap_config cs35l56_regmap_spi; extern struct regmap_config cs35l56_regmap_sdw; @@ -284,8 +294,6 @@ extern const char * const cs35l56_tx_input_texts[CS35L56_NUM_INPUT_SRC]; extern const unsigned int cs35l56_tx_input_values[CS35L56_NUM_INPUT_SRC];
int cs35l56_set_patch(struct cs35l56_base *cs35l56_base); -int cs35l56_init_asp1_regs_for_driver_control(struct cs35l56_base *cs35l56_base); -int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base); int cs35l56_mbox_send(struct cs35l56_base *cs35l56_base, unsigned int command); int cs35l56_firmware_shutdown(struct cs35l56_base *cs35l56_base); int cs35l56_wait_for_firmware_boot(struct cs35l56_base *cs35l56_base); diff --git a/io_uring/net.c b/io_uring/net.c index cf742bdd2a93..09bb82bc209a 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -591,17 +591,18 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags) .iovs = &kmsg->fast_iov, .max_len = INT_MAX, .nr_iovs = 1, - .mode = KBUF_MODE_EXPAND, };
if (kmsg->free_iov) { arg.nr_iovs = kmsg->free_iov_nr; arg.iovs = kmsg->free_iov; - arg.mode |= KBUF_MODE_FREE; + arg.mode = KBUF_MODE_FREE; }
if (!(sr->flags & IORING_RECVSEND_BUNDLE)) arg.nr_iovs = 1; + else + arg.mode |= KBUF_MODE_EXPAND;
ret = io_buffers_select(req, &arg, issue_flags); if (unlikely(ret < 0)) @@ -613,6 +614,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags) if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) { kmsg->free_iov_nr = ret; kmsg->free_iov = arg.iovs; + req->flags |= REQ_F_NEED_CLEANUP; } }
@@ -1084,6 +1086,7 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) { kmsg->free_iov_nr = ret; kmsg->free_iov = arg.iovs; + req->flags |= REQ_F_NEED_CLEANUP; } } else { void __user *buf; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 6b422c275f78..a8845cc299fe 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -7716,6 +7716,13 @@ static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; int err;
+ if (reg->type != PTR_TO_STACK && reg->type != CONST_PTR_TO_DYNPTR) { + verbose(env, + "arg#%d expected pointer to stack or const struct bpf_dynptr\n", + regno); + return -EINVAL; + } + /* MEM_UNINIT and MEM_RDONLY are exclusive, when applied to an * ARG_PTR_TO_DYNPTR (or ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_*): */ @@ -9465,6 +9472,10 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog, return -EINVAL; } } else if (arg->arg_type == (ARG_PTR_TO_DYNPTR | MEM_RDONLY)) { + ret = check_func_arg_reg_off(env, reg, regno, ARG_PTR_TO_DYNPTR); + if (ret) + return ret; + ret = process_dynptr_func(env, regno, -1, arg->arg_type, 0); if (ret) return ret; @@ -11958,12 +11969,6 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ enum bpf_arg_type dynptr_arg_type = ARG_PTR_TO_DYNPTR; int clone_ref_obj_id = 0;
- if (reg->type != PTR_TO_STACK && - reg->type != CONST_PTR_TO_DYNPTR) { - verbose(env, "arg#%d expected pointer to stack or dynptr_ptr\n", i); - return -EINVAL; - } - if (reg->type == CONST_PTR_TO_DYNPTR) dynptr_arg_type |= MEM_RDONLY;
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 07e99c936ba5..1dee88ba0ae4 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -530,6 +530,7 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node, flags = IRQD_AFFINITY_MANAGED | IRQD_MANAGED_SHUTDOWN; } + flags |= IRQD_AFFINITY_SET; mask = &affinity->mask; node = cpu_to_node(cpumask_first(mask)); affinity++; diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 1f05a19918f4..c6ac0d0377d7 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -231,7 +231,7 @@ void static_key_disable_cpuslocked(struct static_key *key) }
jump_label_lock(); - if (atomic_cmpxchg(&key->enabled, 1, 0)) + if (atomic_cmpxchg(&key->enabled, 1, 0) == 1) jump_label_update(key); jump_label_unlock(); } @@ -284,7 +284,7 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key) return;
guard(mutex)(&jump_label_mutex); - if (atomic_cmpxchg(&key->enabled, 1, 0)) + if (atomic_cmpxchg(&key->enabled, 1, 0) == 1) jump_label_update(key); else WARN_ON_ONCE(!static_key_slow_try_dec(key)); diff --git a/kernel/kcov.c b/kernel/kcov.c index f0a69d402066..274b6b7c718d 100644 --- a/kernel/kcov.c +++ b/kernel/kcov.c @@ -161,6 +161,15 @@ static void kcov_remote_area_put(struct kcov_remote_area *area, kmsan_unpoison_memory(&area->list, sizeof(area->list)); }
+/* + * Unlike in_serving_softirq(), this function returns false when called during + * a hardirq or an NMI that happened in the softirq context. + */ +static inline bool in_softirq_really(void) +{ + return in_serving_softirq() && !in_hardirq() && !in_nmi(); +} + static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t) { unsigned int mode; @@ -170,7 +179,7 @@ static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_stru * so we ignore code executed in interrupts, unless we are in a remote * coverage collection section in a softirq. */ - if (!in_task() && !(in_serving_softirq() && t->kcov_softirq)) + if (!in_task() && !(in_softirq_really() && t->kcov_softirq)) return false; mode = READ_ONCE(t->kcov_mode); /* @@ -849,7 +858,7 @@ void kcov_remote_start(u64 handle)
if (WARN_ON(!kcov_check_handle(handle, true, true, true))) return; - if (!in_task() && !in_serving_softirq()) + if (!in_task() && !in_softirq_really()) return;
local_lock_irqsave(&kcov_percpu_data.lock, flags); @@ -991,7 +1000,7 @@ void kcov_remote_stop(void) int sequence; unsigned long flags;
- if (!in_task() && !in_serving_softirq()) + if (!in_task() && !in_softirq_really()) return;
local_lock_irqsave(&kcov_percpu_data.lock, flags); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 6a76a8100073..85251c254d8a 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1557,8 +1557,8 @@ static bool is_cfi_preamble_symbol(unsigned long addr) if (lookup_symbol_name(addr, symbuf)) return false;
- return str_has_prefix("__cfi_", symbuf) || - str_has_prefix("__pfx_", symbuf); + return str_has_prefix(symbuf, "__cfi_") || + str_has_prefix(symbuf, "__pfx_"); }
static int check_kprobe_address_safe(struct kprobe *p, diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index f5a36e67b593..ac2e22502741 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -357,7 +357,7 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev) static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) { struct pv_node *pn = (struct pv_node *)node; - enum vcpu_state old = vcpu_halted; + u8 old = vcpu_halted; /* * If the vCPU is indeed halted, advance its state to match that of * pv_wait_node(). If OTOH this fails, the vCPU was running and will diff --git a/kernel/module/main.c b/kernel/module/main.c index d18a94b973e1..3f9da537024a 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -3101,7 +3101,7 @@ static bool idempotent(struct idempotent *u, const void *cookie) struct idempotent *existing; bool first;
- u->ret = 0; + u->ret = -EINTR; u->cookie = cookie; init_completion(&u->complete);
@@ -3137,7 +3137,7 @@ static int idempotent_complete(struct idempotent *u, int ret) hlist_for_each_entry_safe(pos, next, head, entry) { if (pos->cookie != cookie) continue; - hlist_del(&pos->entry); + hlist_del_init(&pos->entry); pos->ret = ret; complete(&pos->complete); } @@ -3145,6 +3145,28 @@ static int idempotent_complete(struct idempotent *u, int ret) return ret; }
+/* + * Wait for the idempotent worker. + * + * If we get interrupted, we need to remove ourselves from the + * the idempotent list, and the completion may still come in. + * + * The 'idem_lock' protects against the race, and 'idem.ret' was + * initialized to -EINTR and is thus always the right return + * value even if the idempotent work then completes between + * the wait_for_completion and the cleanup. + */ +static int idempotent_wait_for_completion(struct idempotent *u) +{ + if (wait_for_completion_interruptible(&u->complete)) { + spin_lock(&idem_lock); + if (!hlist_unhashed(&u->entry)) + hlist_del(&u->entry); + spin_unlock(&idem_lock); + } + return u->ret; +} + static int init_module_from_file(struct file *f, const char __user * uargs, int flags) { struct load_info info = { }; @@ -3180,15 +3202,16 @@ static int idempotent_init_module(struct file *f, const char __user * uargs, int if (!f || !(f->f_mode & FMODE_READ)) return -EBADF;
- /* See if somebody else is doing the operation? */ - if (idempotent(&idem, file_inode(f))) { - wait_for_completion(&idem.complete); - return idem.ret; + /* Are we the winners of the race and get to do this? */ + if (!idempotent(&idem, file_inode(f))) { + int ret = init_module_from_file(f, uargs, flags); + return idempotent_complete(&idem, ret); }
- /* Otherwise, we'll do it and complete others */ - return idempotent_complete(&idem, - init_module_from_file(f, uargs, flags)); + /* + * Somebody else won the race and is loading the module. + */ + return idempotent_wait_for_completion(&idem); }
SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) diff --git a/kernel/padata.c b/kernel/padata.c index 53f4bc912712..0fa6c2895460 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -517,6 +517,13 @@ void __init padata_do_multithreaded(struct padata_mt_job *job) ps.chunk_size = max(ps.chunk_size, job->min_chunk); ps.chunk_size = roundup(ps.chunk_size, job->align);
+ /* + * chunk_size can be 0 if the caller sets min_chunk to 0. So force it + * to at least 1 to prevent divide-by-0 panic in padata_mt_helper().` + */ + if (!ps.chunk_size) + ps.chunk_size = 1U; + list_for_each_entry(pw, &works, pw_list) if (job->numa_aware) { int old_node = atomic_read(&last_used_nid); diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 25f3cf679b35..bdf0087d6442 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -249,24 +249,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) set_current_state(TASK_INTERRUPTIBLE); if (pid_ns->pid_allocated == init_pids) break; - /* - * Release tasks_rcu_exit_srcu to avoid following deadlock: - * - * 1) TASK A unshare(CLONE_NEWPID) - * 2) TASK A fork() twice -> TASK B (child reaper for new ns) - * and TASK C - * 3) TASK B exits, kills TASK C, waits for TASK A to reap it - * 4) TASK A calls synchronize_rcu_tasks() - * -> synchronize_srcu(tasks_rcu_exit_srcu) - * 5) *DEADLOCK* - * - * It is considered safe to release tasks_rcu_exit_srcu here - * because we assume the current task can not be concurrently - * reaped at this point. - */ - exit_tasks_rcu_stop(); schedule(); - exit_tasks_rcu_start(); } __set_current_state(TASK_RUNNING);
diff --git a/kernel/profile.c b/kernel/profile.c index 2b775cc5c28f..f2bb82c69f72 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -57,20 +57,11 @@ static DEFINE_MUTEX(profile_flip_mutex); int profile_setup(char *str) { static const char schedstr[] = "schedule"; - static const char sleepstr[] = "sleep"; static const char kvmstr[] = "kvm"; const char *select = NULL; int par;
- if (!strncmp(str, sleepstr, strlen(sleepstr))) { -#ifdef CONFIG_SCHEDSTATS - force_schedstat_enabled(); - prof_on = SLEEP_PROFILING; - select = sleepstr; -#else - pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n"); -#endif /* CONFIG_SCHEDSTATS */ - } else if (!strncmp(str, schedstr, strlen(schedstr))) { + if (!strncmp(str, schedstr, strlen(schedstr))) { prof_on = SCHED_PROFILING; select = schedstr; } else if (!strncmp(str, kvmstr, strlen(kvmstr))) { diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 807fbf6123a7..251cead74460 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -2626,7 +2626,7 @@ static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); rfcpp = rfp->rcu_fwd_cb_tail; rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; - WRITE_ONCE(*rfcpp, rfcp); + smp_store_release(rfcpp, rfcp); WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); if (i >= ARRAY_SIZE(rfp->n_launders_hist)) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 098e82bcc427..ba3440a45b6d 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -858,7 +858,7 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) // not know to synchronize with this RCU Tasks grace period) have // completed exiting. The synchronize_rcu() in rcu_tasks_postgp() // will take care of any tasks stuck in the non-preemptible region -// of do_exit() following its call to exit_tasks_rcu_stop(). +// of do_exit() following its call to exit_tasks_rcu_finish(). // check_all_holdout_tasks(), repeatedly until holdout list is empty: // Scans the holdout list, attempting to identify a quiescent state // for each task on the list. If there is a quiescent state, the @@ -1220,7 +1220,7 @@ void exit_tasks_rcu_start(void) * Remove the task from the "yet another list" because do_exit() is now * non-preemptible, allowing synchronize_rcu() to wait beyond this point. */ -void exit_tasks_rcu_stop(void) +void exit_tasks_rcu_finish(void) { unsigned long flags; struct rcu_tasks_percpu *rtpcp; @@ -1231,22 +1231,12 @@ void exit_tasks_rcu_stop(void) raw_spin_lock_irqsave_rcu_node(rtpcp, flags); list_del_init(&t->rcu_tasks_exit_list); raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); -}
-/* - * Contribute to protect against tasklist scan blind spot while the - * task is exiting and may be removed from the tasklist. See - * corresponding synchronize_srcu() for further details. - */ -void exit_tasks_rcu_finish(void) -{ - exit_tasks_rcu_stop(); - exit_tasks_rcu_finish_trace(current); + exit_tasks_rcu_finish_trace(t); }
#else /* #ifdef CONFIG_TASKS_RCU */ void exit_tasks_rcu_start(void) { } -void exit_tasks_rcu_stop(void) { } void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); } #endif /* #else #ifdef CONFIG_TASKS_RCU */
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 28c7031711a3..63fb007beeaf 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -5110,11 +5110,15 @@ void rcutree_migrate_callbacks(int cpu) struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); bool needwake;
- if (rcu_rdp_is_offloaded(rdp) || - rcu_segcblist_empty(&rdp->cblist)) - return; /* No callbacks to migrate. */ + if (rcu_rdp_is_offloaded(rdp)) + return;
raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags); + if (rcu_segcblist_empty(&rdp->cblist)) { + raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); + return; /* No callbacks to migrate. */ + } + WARN_ON_ONCE(rcu_rdp_cpu_online(rdp)); rcu_barrier_entrain(rdp); my_rdp = this_cpu_ptr(&rcu_data); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ebf21373f663..3e84a3b7b7bb 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9604,6 +9604,30 @@ void set_rq_offline(struct rq *rq) } }
+static inline void sched_set_rq_online(struct rq *rq, int cpu) +{ + struct rq_flags rf; + + rq_lock_irqsave(rq, &rf); + if (rq->rd) { + BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); + set_rq_online(rq); + } + rq_unlock_irqrestore(rq, &rf); +} + +static inline void sched_set_rq_offline(struct rq *rq, int cpu) +{ + struct rq_flags rf; + + rq_lock_irqsave(rq, &rf); + if (rq->rd) { + BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); + set_rq_offline(rq); + } + rq_unlock_irqrestore(rq, &rf); +} + /* * used to mark begin/end of suspend/resume: */ @@ -9654,10 +9678,25 @@ static int cpuset_cpu_inactive(unsigned int cpu) return 0; }
+static inline void sched_smt_present_inc(int cpu) +{ +#ifdef CONFIG_SCHED_SMT + if (cpumask_weight(cpu_smt_mask(cpu)) == 2) + static_branch_inc_cpuslocked(&sched_smt_present); +#endif +} + +static inline void sched_smt_present_dec(int cpu) +{ +#ifdef CONFIG_SCHED_SMT + if (cpumask_weight(cpu_smt_mask(cpu)) == 2) + static_branch_dec_cpuslocked(&sched_smt_present); +#endif +} + int sched_cpu_activate(unsigned int cpu) { struct rq *rq = cpu_rq(cpu); - struct rq_flags rf;
/* * Clear the balance_push callback and prepare to schedule @@ -9665,13 +9704,10 @@ int sched_cpu_activate(unsigned int cpu) */ balance_push_set(cpu, false);
-#ifdef CONFIG_SCHED_SMT /* * When going up, increment the number of cores with SMT present. */ - if (cpumask_weight(cpu_smt_mask(cpu)) == 2) - static_branch_inc_cpuslocked(&sched_smt_present); -#endif + sched_smt_present_inc(cpu); set_cpu_active(cpu, true);
if (sched_smp_initialized) { @@ -9689,12 +9725,7 @@ int sched_cpu_activate(unsigned int cpu) * 2) At runtime, if cpuset_cpu_active() fails to rebuild the * domains. */ - rq_lock_irqsave(rq, &rf); - if (rq->rd) { - BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); - set_rq_online(rq); - } - rq_unlock_irqrestore(rq, &rf); + sched_set_rq_online(rq, cpu);
return 0; } @@ -9702,7 +9733,6 @@ int sched_cpu_activate(unsigned int cpu) int sched_cpu_deactivate(unsigned int cpu) { struct rq *rq = cpu_rq(cpu); - struct rq_flags rf; int ret;
/* @@ -9733,20 +9763,14 @@ int sched_cpu_deactivate(unsigned int cpu) */ synchronize_rcu();
- rq_lock_irqsave(rq, &rf); - if (rq->rd) { - BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); - set_rq_offline(rq); - } - rq_unlock_irqrestore(rq, &rf); + sched_set_rq_offline(rq, cpu);
-#ifdef CONFIG_SCHED_SMT /* * When going down, decrement the number of cores with SMT present. */ - if (cpumask_weight(cpu_smt_mask(cpu)) == 2) - static_branch_dec_cpuslocked(&sched_smt_present); + sched_smt_present_dec(cpu);
+#ifdef CONFIG_SCHED_SMT sched_core_cpu_deactivate(cpu); #endif
@@ -9756,6 +9780,8 @@ int sched_cpu_deactivate(unsigned int cpu) sched_update_numa(cpu, false); ret = cpuset_cpu_inactive(cpu); if (ret) { + sched_smt_present_inc(cpu); + sched_set_rq_online(rq, cpu); balance_push_set(cpu, false); set_cpu_active(cpu, true); sched_update_numa(cpu, true); diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index aa48b2ec879d..4feef0d4e449 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -582,6 +582,12 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, }
stime = mul_u64_u64_div_u64(stime, rtime, stime + utime); + /* + * Because mul_u64_u64_div_u64() can approximate on some + * achitectures; enforce the constraint that: a*b/(b+c) <= a. + */ + if (unlikely(stime > rtime)) + stime = rtime;
update: /* diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c index 78e48f5426ee..eb0cdcd4d921 100644 --- a/kernel/sched/stats.c +++ b/kernel/sched/stats.c @@ -92,16 +92,6 @@ void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
trace_sched_stat_blocked(p, delta);
- /* - * Blocking time is in units of nanosecs, so shift by - * 20 to get a milliseconds-range estimation of the - * amount of time that the task spent sleeping: - */ - if (unlikely(prof_on == SLEEP_PROFILING)) { - profile_hits(SLEEP_PROFILING, - (void *)get_wchan(p), - delta >> 20); - } account_scheduler_latency(p, delta >> 10, 0); } } diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index d25ba49e313c..d0538a75f4c6 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -246,7 +246,7 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
wd_delay = cycles_to_nsec_safe(watchdog, *wdnow, wd_end); if (wd_delay <= WATCHDOG_MAX_SKEW) { - if (nretries > 1 || nretries >= max_retries) { + if (nretries > 1 && nretries >= max_retries) { pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n", smp_processor_id(), watchdog->name, nretries); } diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 406dccb79c2b..8d2dd214ec68 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -727,17 +727,16 @@ static inline void process_adjtimex_modes(const struct __kernel_timex *txc, }
if (txc->modes & ADJ_MAXERROR) - time_maxerror = txc->maxerror; + time_maxerror = clamp(txc->maxerror, 0, NTP_PHASE_LIMIT);
if (txc->modes & ADJ_ESTERROR) - time_esterror = txc->esterror; + time_esterror = clamp(txc->esterror, 0, NTP_PHASE_LIMIT);
if (txc->modes & ADJ_TIMECONST) { - time_constant = txc->constant; + time_constant = clamp(txc->constant, 0, MAXTC); if (!(time_status & STA_NANO)) time_constant += 4; - time_constant = min(time_constant, (long)MAXTC); - time_constant = max(time_constant, 0l); + time_constant = clamp(time_constant, 0, MAXTC); }
if (txc->modes & ADJ_TAI && diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index b4843099a8da..ed58eebb4e8f 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -1141,7 +1141,6 @@ void tick_broadcast_switch_to_oneshot(void) #ifdef CONFIG_HOTPLUG_CPU void hotplug_cpu__broadcast_tick_pull(int deadcpu) { - struct tick_device *td = this_cpu_ptr(&tick_cpu_device); struct clock_event_device *bc; unsigned long flags;
@@ -1167,6 +1166,8 @@ void hotplug_cpu__broadcast_tick_pull(int deadcpu) * device to avoid the starvation. */ if (tick_check_broadcast_expired()) { + struct tick_device *td = this_cpu_ptr(&tick_cpu_device); + cpumask_clear_cpu(smp_processor_id(), tick_broadcast_force_mask); tick_program_event(td->evtdev->next_event, 1); } diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 4e18db1819f8..0381366c541e 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -2479,7 +2479,7 @@ int do_adjtimex(struct __kernel_timex *txc) clock_set |= timekeeping_advance(TK_ADV_FREQ);
if (clock_set) - clock_was_set(CLOCK_REALTIME); + clock_was_set(CLOCK_SET_WALL);
ntp_notify_cmos_timer();
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 749a182dab48..8edab43580d5 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -1573,6 +1573,29 @@ static inline void *event_file_data(struct file *filp) extern struct mutex event_mutex; extern struct list_head ftrace_events;
+/* + * When the trace_event_file is the filp->i_private pointer, + * it must be taken under the event_mutex lock, and then checked + * if the EVENT_FILE_FL_FREED flag is set. If it is, then the + * data pointed to by the trace_event_file can not be trusted. + * + * Use the event_file_file() to access the trace_event_file from + * the filp the first time under the event_mutex and check for + * NULL. If it is needed to be retrieved again and the event_mutex + * is still held, then the event_file_data() can be used and it + * is guaranteed to be valid. + */ +static inline struct trace_event_file *event_file_file(struct file *filp) +{ + struct trace_event_file *file; + + lockdep_assert_held(&event_mutex); + file = READ_ONCE(file_inode(filp)->i_private); + if (!file || file->flags & EVENT_FILE_FL_FREED) + return NULL; + return file; +} + extern const struct file_operations event_trigger_fops; extern const struct file_operations event_hist_fops; extern const struct file_operations event_hist_debug_fops; diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 6ef29eba90ce..f08fbaf8cad6 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -1386,12 +1386,12 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, char buf[4] = "0";
mutex_lock(&event_mutex); - file = event_file_data(filp); + file = event_file_file(filp); if (likely(file)) flags = file->flags; mutex_unlock(&event_mutex);
- if (!file || flags & EVENT_FILE_FL_FREED) + if (!file) return -ENODEV;
if (flags & EVENT_FILE_FL_ENABLED && @@ -1424,8 +1424,8 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, case 1: ret = -ENODEV; mutex_lock(&event_mutex); - file = event_file_data(filp); - if (likely(file && !(file->flags & EVENT_FILE_FL_FREED))) { + file = event_file_file(filp); + if (likely(file)) { ret = tracing_update_buffers(file->tr); if (ret < 0) { mutex_unlock(&event_mutex); @@ -1540,7 +1540,8 @@ enum {
static void *f_next(struct seq_file *m, void *v, loff_t *pos) { - struct trace_event_call *call = event_file_data(m->private); + struct trace_event_file *file = event_file_data(m->private); + struct trace_event_call *call = file->event_call; struct list_head *common_head = &ftrace_common_fields; struct list_head *head = trace_get_fields(call); struct list_head *node = v; @@ -1572,7 +1573,8 @@ static void *f_next(struct seq_file *m, void *v, loff_t *pos)
static int f_show(struct seq_file *m, void *v) { - struct trace_event_call *call = event_file_data(m->private); + struct trace_event_file *file = event_file_data(m->private); + struct trace_event_call *call = file->event_call; struct ftrace_event_field *field; const char *array_descriptor;
@@ -1627,12 +1629,14 @@ static int f_show(struct seq_file *m, void *v)
static void *f_start(struct seq_file *m, loff_t *pos) { + struct trace_event_file *file; void *p = (void *)FORMAT_HEADER; loff_t l = 0;
/* ->stop() is called even if ->start() fails */ mutex_lock(&event_mutex); - if (!event_file_data(m->private)) + file = event_file_file(m->private); + if (!file) return ERR_PTR(-ENODEV);
while (l < *pos && p) @@ -1706,8 +1710,8 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, trace_seq_init(s);
mutex_lock(&event_mutex); - file = event_file_data(filp); - if (file && !(file->flags & EVENT_FILE_FL_FREED)) + file = event_file_file(filp); + if (file) print_event_filter(file, s); mutex_unlock(&event_mutex);
@@ -1736,9 +1740,13 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, return PTR_ERR(buf);
mutex_lock(&event_mutex); - file = event_file_data(filp); - if (file) - err = apply_event_filter(file, buf); + file = event_file_file(filp); + if (file) { + if (file->flags & EVENT_FILE_FL_FREED) + err = -ENODEV; + else + err = apply_event_filter(file, buf); + } mutex_unlock(&event_mutex);
kfree(buf); @@ -2485,7 +2493,6 @@ static int event_callback(const char *name, umode_t *mode, void **data, if (strcmp(name, "format") == 0) { *mode = TRACE_MODE_READ; *fops = &ftrace_event_format_fops; - *data = call; return 1; }
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 6ece1308d36a..5f9119eb7c67 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -5601,7 +5601,7 @@ static int hist_show(struct seq_file *m, void *v)
mutex_lock(&event_mutex);
- event_file = event_file_data(m->private); + event_file = event_file_file(m->private); if (unlikely(!event_file)) { ret = -ENODEV; goto out_unlock; @@ -5880,7 +5880,7 @@ static int hist_debug_show(struct seq_file *m, void *v)
mutex_lock(&event_mutex);
- event_file = event_file_data(m->private); + event_file = event_file_file(m->private); if (unlikely(!event_file)) { ret = -ENODEV; goto out_unlock; diff --git a/kernel/trace/trace_events_inject.c b/kernel/trace/trace_events_inject.c index 8650562bdaa9..a8f076809db4 100644 --- a/kernel/trace/trace_events_inject.c +++ b/kernel/trace/trace_events_inject.c @@ -299,7 +299,7 @@ event_inject_write(struct file *filp, const char __user *ubuf, size_t cnt, strim(buf);
mutex_lock(&event_mutex); - file = event_file_data(filp); + file = event_file_file(filp); if (file) { call = file->event_call; size = parse_entry(buf, call, &entry); diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 4bec043c8690..a5e3d6acf1e1 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -159,7 +159,7 @@ static void *trigger_start(struct seq_file *m, loff_t *pos)
/* ->stop() is called even if ->start() fails */ mutex_lock(&event_mutex); - event_file = event_file_data(m->private); + event_file = event_file_file(m->private); if (unlikely(!event_file)) return ERR_PTR(-ENODEV);
@@ -213,7 +213,7 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
mutex_lock(&event_mutex);
- if (unlikely(!event_file_data(file))) { + if (unlikely(!event_file_file(file))) { mutex_unlock(&event_mutex); return -ENODEV; } @@ -293,7 +293,7 @@ static ssize_t event_trigger_regex_write(struct file *file, strim(buf);
mutex_lock(&event_mutex); - event_file = event_file_data(file); + event_file = event_file_file(file); if (unlikely(!event_file)) { mutex_unlock(&event_mutex); kfree(buf); diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c index a4dcf0f24352..3a56e7c8aa4f 100644 --- a/kernel/trace/tracing_map.c +++ b/kernel/trace/tracing_map.c @@ -454,7 +454,7 @@ static struct tracing_map_elt *get_free_elt(struct tracing_map *map) struct tracing_map_elt *elt = NULL; int idx;
- idx = atomic_inc_return(&map->next_elt); + idx = atomic_fetch_add_unless(&map->next_elt, 1, map->max_elts); if (idx < map->max_elts) { elt = *(TRACING_MAP_ELT(map->elts, idx)); if (map->ops && map->ops->elt_init) @@ -699,7 +699,7 @@ void tracing_map_clear(struct tracing_map *map) { unsigned int i;
- atomic_set(&map->next_elt, -1); + atomic_set(&map->next_elt, 0); atomic64_set(&map->hits, 0); atomic64_set(&map->drops, 0);
@@ -783,7 +783,7 @@ struct tracing_map *tracing_map_create(unsigned int map_bits,
map->map_bits = map_bits; map->max_elts = (1 << map_bits); - atomic_set(&map->next_elt, -1); + atomic_set(&map->next_elt, 0);
map->map_size = (1 << (map_bits + 1)); map->ops = ops; diff --git a/lib/debugobjects.c b/lib/debugobjects.c index fb12a9bacd2f..7cea91e193a8 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -78,16 +78,17 @@ static bool obj_freeing; /* The number of objs on the global free list */ static int obj_nr_tofree;
-static int debug_objects_maxchain __read_mostly; -static int __maybe_unused debug_objects_maxchecked __read_mostly; -static int debug_objects_fixups __read_mostly; -static int debug_objects_warnings __read_mostly; -static int debug_objects_enabled __read_mostly - = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; -static int debug_objects_pool_size __read_mostly - = ODEBUG_POOL_SIZE; -static int debug_objects_pool_min_level __read_mostly - = ODEBUG_POOL_MIN_LEVEL; +static int __data_racy debug_objects_maxchain __read_mostly; +static int __data_racy __maybe_unused debug_objects_maxchecked __read_mostly; +static int __data_racy debug_objects_fixups __read_mostly; +static int __data_racy debug_objects_warnings __read_mostly; +static int __data_racy debug_objects_enabled __read_mostly + = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; +static int __data_racy debug_objects_pool_size __read_mostly + = ODEBUG_POOL_SIZE; +static int __data_racy debug_objects_pool_min_level __read_mostly + = ODEBUG_POOL_MIN_LEVEL; + static const struct debug_obj_descr *descr_test __read_mostly; static struct kmem_cache *obj_cache __ro_after_init;
diff --git a/mm/list_lru.c b/mm/list_lru.c index 3fd64736bc45..6ac393ea7439 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -85,6 +85,7 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx) } #endif /* CONFIG_MEMCG_KMEM */
+/* The caller must ensure the memcg lifetime. */ bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid, struct mem_cgroup *memcg) { @@ -109,14 +110,22 @@ EXPORT_SYMBOL_GPL(list_lru_add);
bool list_lru_add_obj(struct list_lru *lru, struct list_head *item) { + bool ret; int nid = page_to_nid(virt_to_page(item)); - struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ? - mem_cgroup_from_slab_obj(item) : NULL;
- return list_lru_add(lru, item, nid, memcg); + if (list_lru_memcg_aware(lru)) { + rcu_read_lock(); + ret = list_lru_add(lru, item, nid, mem_cgroup_from_slab_obj(item)); + rcu_read_unlock(); + } else { + ret = list_lru_add(lru, item, nid, NULL); + } + + return ret; } EXPORT_SYMBOL_GPL(list_lru_add_obj);
+/* The caller must ensure the memcg lifetime. */ bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid, struct mem_cgroup *memcg) { @@ -139,11 +148,18 @@ EXPORT_SYMBOL_GPL(list_lru_del);
bool list_lru_del_obj(struct list_lru *lru, struct list_head *item) { + bool ret; int nid = page_to_nid(virt_to_page(item)); - struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ? - mem_cgroup_from_slab_obj(item) : NULL;
- return list_lru_del(lru, item, nid, memcg); + if (list_lru_memcg_aware(lru)) { + rcu_read_lock(); + ret = list_lru_del(lru, item, nid, mem_cgroup_from_slab_obj(item)); + rcu_read_unlock(); + } else { + ret = list_lru_del(lru, item, nid, NULL); + } + + return ret; } EXPORT_SYMBOL_GPL(list_lru_del_obj);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 8f2f1bb18c9c..88c7a0861017 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5568,11 +5568,28 @@ static struct cftype mem_cgroup_legacy_files[] = {
#define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1) static DEFINE_IDR(mem_cgroup_idr); +static DEFINE_SPINLOCK(memcg_idr_lock); + +static int mem_cgroup_alloc_id(void) +{ + int ret; + + idr_preload(GFP_KERNEL); + spin_lock(&memcg_idr_lock); + ret = idr_alloc(&mem_cgroup_idr, NULL, 1, MEM_CGROUP_ID_MAX + 1, + GFP_NOWAIT); + spin_unlock(&memcg_idr_lock); + idr_preload_end(); + return ret; +}
static void mem_cgroup_id_remove(struct mem_cgroup *memcg) { if (memcg->id.id > 0) { + spin_lock(&memcg_idr_lock); idr_remove(&mem_cgroup_idr, memcg->id.id); + spin_unlock(&memcg_idr_lock); + memcg->id.id = 0; } } @@ -5706,8 +5723,7 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent) if (!memcg) return ERR_PTR(error);
- memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, - 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL); + memcg->id.id = mem_cgroup_alloc_id(); if (memcg->id.id < 0) { error = memcg->id.id; goto fail; @@ -5854,7 +5870,9 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css) * publish it here at the end of onlining. This matches the * regular ID destruction during offlining. */ + spin_lock(&memcg_idr_lock); idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); + spin_unlock(&memcg_idr_lock);
return 0; offline_kmem: diff --git a/mm/slub.c b/mm/slub.c index 4927edec6a8c..849e8972e2ff 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4655,6 +4655,9 @@ static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) if (!df.slab) continue;
+ if (kfence_free(df.freelist)) + continue; + do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, _RET_IP_); } while (likely(size)); diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index 2f26147fdf3c..4e90bd722e7b 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -2972,6 +2972,20 @@ static int hci_passive_scan_sync(struct hci_dev *hdev) } else if (hci_is_adv_monitoring(hdev)) { window = hdev->le_scan_window_adv_monitor; interval = hdev->le_scan_int_adv_monitor; + + /* Disable duplicates filter when scanning for advertisement + * monitor for the following reasons. + * + * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm + * controllers ignore RSSI_Sampling_Period when the duplicates + * filter is enabled. + * + * For SW pattern filtering, when we're not doing interleaved + * scanning, it is necessary to disable duplicates filter, + * otherwise hosts can only receive one advertisement and it's + * impossible to know if a peer is still in range. + */ + filter_dups = LE_SCAN_FILTER_DUP_DISABLE; } else { window = hdev->le_scan_window; interval = hdev->le_scan_interval; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index c3c26bbb5dda..9988ba382b68 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -6774,6 +6774,7 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, bt_cb(skb)->l2cap.psm = psm;
if (!chan->ops->recv(chan, skb)) { + l2cap_chan_unlock(chan); l2cap_chan_put(chan); return; } diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 9a1cb5079a7a..b2ae0d2434d2 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -2045,16 +2045,14 @@ void br_multicast_del_port(struct net_bridge_port *port) { struct net_bridge *br = port->br; struct net_bridge_port_group *pg; - HLIST_HEAD(deleted_head); struct hlist_node *n;
/* Take care of the remaining groups, only perm ones should be left */ spin_lock_bh(&br->multicast_lock); hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) br_multicast_find_del_pg(br, pg); - hlist_move_list(&br->mcast_gc_list, &deleted_head); spin_unlock_bh(&br->multicast_lock); - br_multicast_gc(&deleted_head); + flush_work(&br->mcast_gc_work); br_multicast_port_ctx_deinit(&port->multicast_ctx); free_percpu(port->mcast_stats); } diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 8ec35194bfcb..ab150641142a 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -148,9 +148,9 @@ static void linkwatch_schedule_work(int urgent) * override the existing timer. */ if (test_bit(LW_URGENT, &linkwatch_flags)) - mod_delayed_work(system_wq, &linkwatch_work, 0); + mod_delayed_work(system_unbound_wq, &linkwatch_work, 0); else - schedule_delayed_work(&linkwatch_work, delay); + queue_delayed_work(system_unbound_wq, &linkwatch_work, delay); }
diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c index 09c0fa6756b7..9171e1066808 100644 --- a/net/ipv4/tcp_ao.c +++ b/net/ipv4/tcp_ao.c @@ -266,32 +266,49 @@ static void tcp_ao_key_free_rcu(struct rcu_head *head) kfree_sensitive(key); }
-void tcp_ao_destroy_sock(struct sock *sk, bool twsk) +static void tcp_ao_info_free_rcu(struct rcu_head *head) { - struct tcp_ao_info *ao; + struct tcp_ao_info *ao = container_of(head, struct tcp_ao_info, rcu); struct tcp_ao_key *key; struct hlist_node *n;
+ hlist_for_each_entry_safe(key, n, &ao->head, node) { + hlist_del(&key->node); + tcp_sigpool_release(key->tcp_sigpool_id); + kfree_sensitive(key); + } + kfree(ao); + static_branch_slow_dec_deferred(&tcp_ao_needed); +} + +static void tcp_ao_sk_omem_free(struct sock *sk, struct tcp_ao_info *ao) +{ + size_t total_ao_sk_mem = 0; + struct tcp_ao_key *key; + + hlist_for_each_entry(key, &ao->head, node) + total_ao_sk_mem += tcp_ao_sizeof_key(key); + atomic_sub(total_ao_sk_mem, &sk->sk_omem_alloc); +} + +void tcp_ao_destroy_sock(struct sock *sk, bool twsk) +{ + struct tcp_ao_info *ao; + if (twsk) { ao = rcu_dereference_protected(tcp_twsk(sk)->ao_info, 1); - tcp_twsk(sk)->ao_info = NULL; + rcu_assign_pointer(tcp_twsk(sk)->ao_info, NULL); } else { ao = rcu_dereference_protected(tcp_sk(sk)->ao_info, 1); - tcp_sk(sk)->ao_info = NULL; + rcu_assign_pointer(tcp_sk(sk)->ao_info, NULL); }
if (!ao || !refcount_dec_and_test(&ao->refcnt)) return;
- hlist_for_each_entry_safe(key, n, &ao->head, node) { - hlist_del_rcu(&key->node); - if (!twsk) - atomic_sub(tcp_ao_sizeof_key(key), &sk->sk_omem_alloc); - call_rcu(&key->rcu, tcp_ao_key_free_rcu); - } - - kfree_rcu(ao, rcu); - static_branch_slow_dec_deferred(&tcp_ao_needed); + if (!twsk) + tcp_ao_sk_omem_free(sk, ao); + call_rcu(&ao->rcu, tcp_ao_info_free_rcu); }
void tcp_ao_time_wait(struct tcp_timewait_sock *tcptw, struct tcp_sock *tp) diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 4b791e74529e..e4ad3311e148 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -140,6 +140,9 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, if (thlen < sizeof(*th)) goto out;
+ if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb))) + goto out; + if (!pskb_may_pull(skb, thlen)) goto out;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 59448a2dbf2c..ee9af921556a 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -278,6 +278,10 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, if (gso_skb->len <= sizeof(*uh) + mss) return ERR_PTR(-EINVAL);
+ if (unlikely(skb_checksum_start(gso_skb) != + skb_transport_header(gso_skb))) + return ERR_PTR(-EINVAL); + if (skb_gso_ok(gso_skb, features | NETIF_F_GSO_ROBUST)) { /* Packet is from an untrusted source, reset gso_segs. */ skb_shinfo(gso_skb)->gso_segs = DIV_ROUND_UP(gso_skb->len - sizeof(*uh), diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 88a34db265d8..7ea4adf81d85 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -88,6 +88,11 @@ /* Default trace flags */ #define L2TP_DEFAULT_DEBUG_FLAGS 0
+#define L2TP_DEPTH_NESTING 2 +#if L2TP_DEPTH_NESTING == SINGLE_DEPTH_NESTING +#error "L2TP requires its own lockdep subclass" +#endif + /* Private data stored for received packets in the skb. */ struct l2tp_skb_cb { @@ -1085,7 +1090,13 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); nf_reset_ct(skb);
- bh_lock_sock_nested(sk); + /* L2TP uses its own lockdep subclass to avoid lockdep splats caused by + * nested socket calls on the same lockdep socket class. This can + * happen when data from a user socket is routed over l2tp, which uses + * another userspace socket. + */ + spin_lock_nested(&sk->sk_lock.slock, L2TP_DEPTH_NESTING); + if (sock_owned_by_user(sk)) { kfree_skb(skb); ret = NET_XMIT_DROP; @@ -1137,7 +1148,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
out_unlock: - bh_unlock_sock(sk); + spin_unlock(&sk->sk_lock.slock);
return ret; } diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 21d55dc539f6..677bbbac9f16 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -616,7 +616,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, return -EINVAL;
if (!pubsta->deflink.ht_cap.ht_supported && - sta->sdata->vif.bss_conf.chanreq.oper.chan->band != NL80211_BAND_6GHZ) + !pubsta->deflink.vht_cap.vht_supported && + !pubsta->deflink.he_cap.has_he && + !pubsta->deflink.eht_cap.has_eht) return -EINVAL;
if (WARN_ON_ONCE(!local->ops->ampdu_action)) diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 8a68382a4fe9..ac2f1a54cc43 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -958,7 +958,8 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
if (subflow->remote_key_valid && (((mp_opt->suboptions & OPTION_MPTCP_DSS) && mp_opt->use_ack) || - ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) && !mp_opt->echo))) { + ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) && + (!mp_opt->echo || subflow->mp_join)))) { /* subflows are fully established as soon as we get any * additional ack, including ADD_ADDR. */ diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 37954a0b087d..4cae2aa7be5c 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -348,7 +348,7 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
if (add_entry) { - if (mptcp_pm_is_kernel(msk)) + if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk))) return false;
sk_reset_timer(sk, &add_entry->add_timer, @@ -512,8 +512,8 @@ __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info)
static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) { + struct mptcp_pm_addr_entry *local, *signal_and_subflow = NULL; struct sock *sk = (struct sock *)msk; - struct mptcp_pm_addr_entry *local; unsigned int add_addr_signal_max; unsigned int local_addr_max; struct pm_nl_pernet *pernet; @@ -555,8 +555,6 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
/* check first for announce */ if (msk->pm.add_addr_signaled < add_addr_signal_max) { - local = select_signal_address(pernet, msk); - /* due to racing events on both ends we can reach here while * previous add address is still running: if we invoke now * mptcp_pm_announce_addr(), that will fail and the @@ -567,16 +565,26 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL)) return;
- if (local) { - if (mptcp_pm_alloc_anno_list(msk, &local->addr)) { - __clear_bit(local->addr.id, msk->pm.id_avail_bitmap); - msk->pm.add_addr_signaled++; - mptcp_pm_announce_addr(msk, &local->addr, false); - mptcp_pm_nl_addr_send_ack(msk); - } - } + local = select_signal_address(pernet, msk); + if (!local) + goto subflow; + + /* If the alloc fails, we are on memory pressure, not worth + * continuing, and trying to create subflows. + */ + if (!mptcp_pm_alloc_anno_list(msk, &local->addr)) + return; + + __clear_bit(local->addr.id, msk->pm.id_avail_bitmap); + msk->pm.add_addr_signaled++; + mptcp_pm_announce_addr(msk, &local->addr, false); + mptcp_pm_nl_addr_send_ack(msk); + + if (local->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) + signal_and_subflow = local; }
+subflow: /* check if should create a new subflow */ while (msk->pm.local_addr_used < local_addr_max && msk->pm.subflows < subflows_max) { @@ -584,9 +592,14 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) bool fullmesh; int i, nr;
- local = select_local_address(pernet, msk); - if (!local) - break; + if (signal_and_subflow) { + local = signal_and_subflow; + signal_and_subflow = NULL; + } else { + local = select_local_address(pernet, msk); + if (!local) + break; + }
fullmesh = !!(local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
@@ -1328,8 +1341,8 @@ int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info) if (ret < 0) return ret;
- if (addr.addr.port && !(addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) { - GENL_SET_ERR_MSG(info, "flags must have signal when using port"); + if (addr.addr.port && !address_use_port(&addr)) { + GENL_SET_ERR_MSG(info, "flags must have signal and not subflow when using port"); return -EINVAL; }
diff --git a/net/sctp/input.c b/net/sctp/input.c index 17fcaa9b0df9..a8a254a5008e 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -735,15 +735,19 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep) struct sock *sk = ep->base.sk; struct net *net = sock_net(sk); struct sctp_hashbucket *head; + int err = 0;
ep->hashent = sctp_ep_hashfn(net, ep->base.bind_addr.port); head = &sctp_ep_hashtable[ep->hashent];
+ write_lock(&head->lock); if (sk->sk_reuseport) { bool any = sctp_is_ep_boundall(sk); struct sctp_endpoint *ep2; struct list_head *list; - int cnt = 0, err = 1; + int cnt = 0; + + err = 1;
list_for_each(list, &ep->base.bind_addr.address_list) cnt++; @@ -761,24 +765,24 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep) if (!err) { err = reuseport_add_sock(sk, sk2, any); if (err) - return err; + goto out; break; } else if (err < 0) { - return err; + goto out; } }
if (err) { err = reuseport_alloc(sk, any); if (err) - return err; + goto out; } }
- write_lock(&head->lock); hlist_add_head(&ep->node, &head->chain); +out: write_unlock(&head->lock); - return 0; + return err; }
/* Add an endpoint to the hash. Local BH-safe. */ @@ -803,10 +807,9 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
head = &sctp_ep_hashtable[ep->hashent];
+ write_lock(&head->lock); if (rcu_access_pointer(sk->sk_reuseport_cb)) reuseport_detach_sock(sk); - - write_lock(&head->lock); hlist_del_init(&ep->node); write_unlock(&head->lock); } diff --git a/net/smc/smc_stats.h b/net/smc/smc_stats.h index 9d32058db2b5..e19177ce4092 100644 --- a/net/smc/smc_stats.h +++ b/net/smc/smc_stats.h @@ -19,7 +19,7 @@
#include "smc_clc.h"
-#define SMC_MAX_FBACK_RSN_CNT 30 +#define SMC_MAX_FBACK_RSN_CNT 36
enum { SMC_BUF_8K, diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 6debf4fd42d4..cef623ea1506 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -369,8 +369,10 @@ static void rpc_make_runnable(struct workqueue_struct *wq, if (RPC_IS_ASYNC(task)) { INIT_WORK(&task->u.tk_work, rpc_async_schedule); queue_work(wq, &task->u.tk_work); - } else + } else { + smp_mb__after_atomic(); wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); + } }
/* diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 11cb5badafb6..be5266007b48 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1473,6 +1473,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, struct unix_sock *u = unix_sk(sk), *newu, *otheru; struct net *net = sock_net(sk); struct sk_buff *skb = NULL; + unsigned char state; long timeo; int err;
@@ -1523,7 +1524,6 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, goto out; }
- /* Latch state of peer */ unix_state_lock(other);
/* Apparently VFS overslept socket death. Retry. */ @@ -1553,37 +1553,21 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, goto restart; }
- /* Latch our state. - - It is tricky place. We need to grab our state lock and cannot - drop lock on peer. It is dangerous because deadlock is - possible. Connect to self case and simultaneous - attempt to connect are eliminated by checking socket - state. other is TCP_LISTEN, if sk is TCP_LISTEN we - check this before attempt to grab lock. - - Well, and we have to recheck the state after socket locked. + /* self connect and simultaneous connect are eliminated + * by rejecting TCP_LISTEN socket to avoid deadlock. */ - switch (READ_ONCE(sk->sk_state)) { - case TCP_CLOSE: - /* This is ok... continue with connect */ - break; - case TCP_ESTABLISHED: - /* Socket is already connected */ - err = -EISCONN; - goto out_unlock; - default: - err = -EINVAL; + state = READ_ONCE(sk->sk_state); + if (unlikely(state != TCP_CLOSE)) { + err = state == TCP_ESTABLISHED ? -EISCONN : -EINVAL; goto out_unlock; }
unix_state_lock_nested(sk, U_LOCK_SECOND);
- if (sk->sk_state != TCP_CLOSE) { + if (unlikely(sk->sk_state != TCP_CLOSE)) { + err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EINVAL; unix_state_unlock(sk); - unix_state_unlock(other); - sock_put(other); - goto restart; + goto out_unlock; }
err = security_unix_stream_connect(sk, other, newsk); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 0fd075238fc7..c2829d673bc7 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -3422,6 +3422,33 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, if (chandef.chan != cur_chan) return -EBUSY;
+ /* only allow this for regular channel widths */ + switch (wdev->links[link_id].ap.chandef.width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + case NL80211_CHAN_WIDTH_40: + case NL80211_CHAN_WIDTH_80: + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + case NL80211_CHAN_WIDTH_320: + break; + default: + return -EINVAL; + } + + switch (chandef.width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + case NL80211_CHAN_WIDTH_40: + case NL80211_CHAN_WIDTH_80: + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + case NL80211_CHAN_WIDTH_320: + break; + default: + return -EINVAL; + } + result = rdev_set_ap_chanwidth(rdev, dev, link_id, &chandef); if (result) @@ -4458,10 +4485,7 @@ static void get_key_callback(void *c, struct key_params *params) struct nlattr *key; struct get_key_cookie *cookie = c;
- if ((params->key && - nla_put(cookie->msg, NL80211_ATTR_KEY_DATA, - params->key_len, params->key)) || - (params->seq && + if ((params->seq && nla_put(cookie->msg, NL80211_ATTR_KEY_SEQ, params->seq_len, params->seq)) || (params->cipher && @@ -4473,10 +4497,7 @@ static void get_key_callback(void *c, struct key_params *params) if (!key) goto nla_put_failure;
- if ((params->key && - nla_put(cookie->msg, NL80211_KEY_DATA, - params->key_len, params->key)) || - (params->seq && + if ((params->seq && nla_put(cookie->msg, NL80211_KEY_SEQ, params->seq_len, params->seq)) || (params->cipher && diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 707d203ba652..78042ac2b71f 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -1989,6 +1989,8 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid) }
static const struct snd_pci_quirk force_connect_list[] = { + SND_PCI_QUIRK(0x103c, 0x83e2, "HP EliteDesk 800 G4", 1), + SND_PCI_QUIRK(0x103c, 0x83ef, "HP MP9 G4 Retail System AMS", 1), SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1), SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1), SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index a6c1e2199e70..3840565ef8b0 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -10671,6 +10671,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0xf111, 0x0009, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
#if 0 /* Below is a quirk table taken from the old code. diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c index 36dddf230c2c..d597e59863ee 100644 --- a/sound/soc/amd/yc/acp6x-mach.c +++ b/sound/soc/amd/yc/acp6x-mach.c @@ -409,6 +409,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_BOARD_NAME, "8A43"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "HP"), + DMI_MATCH(DMI_BOARD_NAME, "8A44"), + } + }, { .driver_data = &acp6x_card, .matches = { diff --git a/sound/soc/codecs/cs-amp-lib.c b/sound/soc/codecs/cs-amp-lib.c index 287ac01a3873..605964af8afa 100644 --- a/sound/soc/codecs/cs-amp-lib.c +++ b/sound/soc/codecs/cs-amp-lib.c @@ -108,7 +108,7 @@ static efi_status_t cs_amp_get_efi_variable(efi_char16_t *name,
KUNIT_STATIC_STUB_REDIRECT(cs_amp_get_efi_variable, name, guid, size, buf);
- if (IS_ENABLED(CONFIG_EFI)) + if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) return efi.get_variable(name, guid, &attr, size, buf);
return EFI_NOT_FOUND; diff --git a/sound/soc/codecs/cs35l56-sdw.c b/sound/soc/codecs/cs35l56-sdw.c index 70ff55c1517f..29a5476af95a 100644 --- a/sound/soc/codecs/cs35l56-sdw.c +++ b/sound/soc/codecs/cs35l56-sdw.c @@ -23,6 +23,79 @@ /* Register addresses are offset when sent over SoundWire */ #define CS35L56_SDW_ADDR_OFFSET 0x8000
+/* Cirrus bus bridge registers */ +#define CS35L56_SDW_MEM_ACCESS_STATUS 0xd0 +#define CS35L56_SDW_MEM_READ_DATA 0xd8 + +#define CS35L56_SDW_LAST_LATE BIT(3) +#define CS35L56_SDW_CMD_IN_PROGRESS BIT(2) +#define CS35L56_SDW_RDATA_RDY BIT(0) + +#define CS35L56_LATE_READ_POLL_US 10 +#define CS35L56_LATE_READ_TIMEOUT_US 1000 + +static int cs35l56_sdw_poll_mem_status(struct sdw_slave *peripheral, + unsigned int mask, + unsigned int match) +{ + int ret, val; + + ret = read_poll_timeout(sdw_read_no_pm, val, + (val < 0) || ((val & mask) == match), + CS35L56_LATE_READ_POLL_US, CS35L56_LATE_READ_TIMEOUT_US, + false, peripheral, CS35L56_SDW_MEM_ACCESS_STATUS); + if (ret < 0) + return ret; + + if (val < 0) + return val; + + return 0; +} + +static int cs35l56_sdw_slow_read(struct sdw_slave *peripheral, unsigned int reg, + u8 *buf, size_t val_size) +{ + int ret, i; + + reg += CS35L56_SDW_ADDR_OFFSET; + + for (i = 0; i < val_size; i += sizeof(u32)) { + /* Poll for bus bridge idle */ + ret = cs35l56_sdw_poll_mem_status(peripheral, + CS35L56_SDW_CMD_IN_PROGRESS, + 0); + if (ret < 0) { + dev_err(&peripheral->dev, "!CMD_IN_PROGRESS fail: %d\n", ret); + return ret; + } + + /* Reading LSByte triggers read of register to holding buffer */ + sdw_read_no_pm(peripheral, reg + i); + + /* Wait for data available */ + ret = cs35l56_sdw_poll_mem_status(peripheral, + CS35L56_SDW_RDATA_RDY, + CS35L56_SDW_RDATA_RDY); + if (ret < 0) { + dev_err(&peripheral->dev, "RDATA_RDY fail: %d\n", ret); + return ret; + } + + /* Read data from buffer */ + ret = sdw_nread_no_pm(peripheral, CS35L56_SDW_MEM_READ_DATA, + sizeof(u32), &buf[i]); + if (ret) { + dev_err(&peripheral->dev, "Late read @%#x failed: %d\n", reg + i, ret); + return ret; + } + + swab32s((u32 *)&buf[i]); + } + + return 0; +} + static int cs35l56_sdw_read_one(struct sdw_slave *peripheral, unsigned int reg, void *buf) { int ret; @@ -48,6 +121,10 @@ static int cs35l56_sdw_read(void *context, const void *reg_buf, int ret;
reg = le32_to_cpu(*(const __le32 *)reg_buf); + + if (cs35l56_is_otp_register(reg)) + return cs35l56_sdw_slow_read(peripheral, reg, buf8, val_size); + reg += CS35L56_SDW_ADDR_OFFSET;
if (val_size == 4) diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c index f609cade805d..6d821a793045 100644 --- a/sound/soc/codecs/cs35l56-shared.c +++ b/sound/soc/codecs/cs35l56-shared.c @@ -20,6 +20,18 @@ static const struct reg_sequence cs35l56_patch[] = { * Firmware can change these to non-defaults to satisfy SDCA. * Ensure that they are at known defaults. */ + { CS35L56_ASP1_ENABLES1, 0x00000000 }, + { CS35L56_ASP1_CONTROL1, 0x00000028 }, + { CS35L56_ASP1_CONTROL2, 0x18180200 }, + { CS35L56_ASP1_CONTROL3, 0x00000002 }, + { CS35L56_ASP1_FRAME_CONTROL1, 0x03020100 }, + { CS35L56_ASP1_FRAME_CONTROL5, 0x00020100 }, + { CS35L56_ASP1_DATA_CONTROL1, 0x00000018 }, + { CS35L56_ASP1_DATA_CONTROL5, 0x00000018 }, + { CS35L56_ASP1TX1_INPUT, 0x00000000 }, + { CS35L56_ASP1TX2_INPUT, 0x00000000 }, + { CS35L56_ASP1TX3_INPUT, 0x00000000 }, + { CS35L56_ASP1TX4_INPUT, 0x00000000 }, { CS35L56_SWIRE_DP3_CH1_INPUT, 0x00000018 }, { CS35L56_SWIRE_DP3_CH2_INPUT, 0x00000019 }, { CS35L56_SWIRE_DP3_CH3_INPUT, 0x00000029 }, @@ -41,12 +53,18 @@ EXPORT_SYMBOL_NS_GPL(cs35l56_set_patch, SND_SOC_CS35L56_SHARED); static const struct reg_default cs35l56_reg_defaults[] = { /* no defaults for OTP_MEM - first read populates cache */
- /* - * No defaults for ASP1 control or ASP1TX mixer. See - * cs35l56_populate_asp1_register_defaults() and - * cs35l56_sync_asp1_mixer_widgets_with_firmware(). - */ - + { CS35L56_ASP1_ENABLES1, 0x00000000 }, + { CS35L56_ASP1_CONTROL1, 0x00000028 }, + { CS35L56_ASP1_CONTROL2, 0x18180200 }, + { CS35L56_ASP1_CONTROL3, 0x00000002 }, + { CS35L56_ASP1_FRAME_CONTROL1, 0x03020100 }, + { CS35L56_ASP1_FRAME_CONTROL5, 0x00020100 }, + { CS35L56_ASP1_DATA_CONTROL1, 0x00000018 }, + { CS35L56_ASP1_DATA_CONTROL5, 0x00000018 }, + { CS35L56_ASP1TX1_INPUT, 0x00000000 }, + { CS35L56_ASP1TX2_INPUT, 0x00000000 }, + { CS35L56_ASP1TX3_INPUT, 0x00000000 }, + { CS35L56_ASP1TX4_INPUT, 0x00000000 }, { CS35L56_SWIRE_DP3_CH1_INPUT, 0x00000018 }, { CS35L56_SWIRE_DP3_CH2_INPUT, 0x00000019 }, { CS35L56_SWIRE_DP3_CH3_INPUT, 0x00000029 }, @@ -206,77 +224,6 @@ static bool cs35l56_volatile_reg(struct device *dev, unsigned int reg) } }
-static const struct reg_sequence cs35l56_asp1_defaults[] = { - REG_SEQ0(CS35L56_ASP1_ENABLES1, 0x00000000), - REG_SEQ0(CS35L56_ASP1_CONTROL1, 0x00000028), - REG_SEQ0(CS35L56_ASP1_CONTROL2, 0x18180200), - REG_SEQ0(CS35L56_ASP1_CONTROL3, 0x00000002), - REG_SEQ0(CS35L56_ASP1_FRAME_CONTROL1, 0x03020100), - REG_SEQ0(CS35L56_ASP1_FRAME_CONTROL5, 0x00020100), - REG_SEQ0(CS35L56_ASP1_DATA_CONTROL1, 0x00000018), - REG_SEQ0(CS35L56_ASP1_DATA_CONTROL5, 0x00000018), - REG_SEQ0(CS35L56_ASP1TX1_INPUT, 0x00000000), - REG_SEQ0(CS35L56_ASP1TX2_INPUT, 0x00000000), - REG_SEQ0(CS35L56_ASP1TX3_INPUT, 0x00000000), - REG_SEQ0(CS35L56_ASP1TX4_INPUT, 0x00000000), -}; - -/* - * The firmware can have control of the ASP so we don't provide regmap - * with defaults for these registers, to prevent a regcache_sync() from - * overwriting the firmware settings. But if the machine driver hooks up - * the ASP it means the driver is taking control of the ASP, so then the - * registers are populated with the defaults. - */ -int cs35l56_init_asp1_regs_for_driver_control(struct cs35l56_base *cs35l56_base) -{ - if (!cs35l56_base->fw_owns_asp1) - return 0; - - cs35l56_base->fw_owns_asp1 = false; - - return regmap_multi_reg_write(cs35l56_base->regmap, cs35l56_asp1_defaults, - ARRAY_SIZE(cs35l56_asp1_defaults)); -} -EXPORT_SYMBOL_NS_GPL(cs35l56_init_asp1_regs_for_driver_control, SND_SOC_CS35L56_SHARED); - -/* - * The firmware boot sequence can overwrite the ASP1 config registers so that - * they don't match regmap's view of their values. Rewrite the values from the - * regmap cache into the hardware registers. - */ -int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base) -{ - struct reg_sequence asp1_regs[ARRAY_SIZE(cs35l56_asp1_defaults)]; - int i, ret; - - if (cs35l56_base->fw_owns_asp1) - return 0; - - memcpy(asp1_regs, cs35l56_asp1_defaults, sizeof(asp1_regs)); - - /* Read current values from regmap cache into the write sequence */ - for (i = 0; i < ARRAY_SIZE(asp1_regs); ++i) { - ret = regmap_read(cs35l56_base->regmap, asp1_regs[i].reg, &asp1_regs[i].def); - if (ret) - goto err; - } - - /* Write the values cache-bypassed so that they will be written to silicon */ - ret = regmap_multi_reg_write_bypassed(cs35l56_base->regmap, asp1_regs, - ARRAY_SIZE(asp1_regs)); - if (ret) - goto err; - - return 0; - -err: - dev_err(cs35l56_base->dev, "Failed to sync ASP1 registers: %d\n", ret); - - return ret; -} -EXPORT_SYMBOL_NS_GPL(cs35l56_force_sync_asp1_registers_from_cache, SND_SOC_CS35L56_SHARED); - int cs35l56_mbox_send(struct cs35l56_base *cs35l56_base, unsigned int command) { unsigned int val; diff --git a/sound/soc/codecs/cs35l56.c b/sound/soc/codecs/cs35l56.c index 7f2f2f8c13fa..84c34f5b1a51 100644 --- a/sound/soc/codecs/cs35l56.c +++ b/sound/soc/codecs/cs35l56.c @@ -63,131 +63,6 @@ static int cs35l56_dspwait_put_volsw(struct snd_kcontrol *kcontrol, return snd_soc_put_volsw(kcontrol, ucontrol); }
-static const unsigned short cs35l56_asp1_mixer_regs[] = { - CS35L56_ASP1TX1_INPUT, CS35L56_ASP1TX2_INPUT, - CS35L56_ASP1TX3_INPUT, CS35L56_ASP1TX4_INPUT, -}; - -static const char * const cs35l56_asp1_mux_control_names[] = { - "ASP1 TX1 Source", "ASP1 TX2 Source", "ASP1 TX3 Source", "ASP1 TX4 Source" -}; - -static int cs35l56_sync_asp1_mixer_widgets_with_firmware(struct cs35l56_private *cs35l56) -{ - struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(cs35l56->component); - const char *prefix = cs35l56->component->name_prefix; - char full_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; - const char *name; - struct snd_kcontrol *kcontrol; - struct soc_enum *e; - unsigned int val[4]; - int i, item, ret; - - if (cs35l56->asp1_mixer_widgets_initialized) - return 0; - - /* - * Resume so we can read the registers from silicon if the regmap - * cache has not yet been populated. - */ - ret = pm_runtime_resume_and_get(cs35l56->base.dev); - if (ret < 0) - return ret; - - /* Wait for firmware download and reboot */ - cs35l56_wait_dsp_ready(cs35l56); - - ret = regmap_bulk_read(cs35l56->base.regmap, CS35L56_ASP1TX1_INPUT, - val, ARRAY_SIZE(val)); - - pm_runtime_mark_last_busy(cs35l56->base.dev); - pm_runtime_put_autosuspend(cs35l56->base.dev); - - if (ret) { - dev_err(cs35l56->base.dev, "Failed to read ASP1 mixer regs: %d\n", ret); - return ret; - } - - for (i = 0; i < ARRAY_SIZE(cs35l56_asp1_mux_control_names); ++i) { - name = cs35l56_asp1_mux_control_names[i]; - - if (prefix) { - snprintf(full_name, sizeof(full_name), "%s %s", prefix, name); - name = full_name; - } - - kcontrol = snd_soc_card_get_kcontrol_locked(dapm->card, name); - if (!kcontrol) { - dev_warn(cs35l56->base.dev, "Could not find control %s\n", name); - continue; - } - - e = (struct soc_enum *)kcontrol->private_value; - item = snd_soc_enum_val_to_item(e, val[i] & CS35L56_ASP_TXn_SRC_MASK); - snd_soc_dapm_mux_update_power(dapm, kcontrol, item, e, NULL); - } - - cs35l56->asp1_mixer_widgets_initialized = true; - - return 0; -} - -static int cs35l56_dspwait_asp1tx_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_component *component = snd_soc_dapm_kcontrol_component(kcontrol); - struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component); - struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; - int index = e->shift_l; - unsigned int addr, val; - int ret; - - ret = cs35l56_sync_asp1_mixer_widgets_with_firmware(cs35l56); - if (ret) - return ret; - - addr = cs35l56_asp1_mixer_regs[index]; - ret = regmap_read(cs35l56->base.regmap, addr, &val); - if (ret) - return ret; - - val &= CS35L56_ASP_TXn_SRC_MASK; - ucontrol->value.enumerated.item[0] = snd_soc_enum_val_to_item(e, val); - - return 0; -} - -static int cs35l56_dspwait_asp1tx_put(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_component *component = snd_soc_dapm_kcontrol_component(kcontrol); - struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); - struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component); - struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; - int item = ucontrol->value.enumerated.item[0]; - int index = e->shift_l; - unsigned int addr, val; - bool changed; - int ret; - - ret = cs35l56_sync_asp1_mixer_widgets_with_firmware(cs35l56); - if (ret) - return ret; - - addr = cs35l56_asp1_mixer_regs[index]; - val = snd_soc_enum_item_to_val(e, item); - - ret = regmap_update_bits_check(cs35l56->base.regmap, addr, - CS35L56_ASP_TXn_SRC_MASK, val, &changed); - if (ret) - return ret; - - if (changed) - snd_soc_dapm_mux_update_power(dapm, kcontrol, item, e, NULL); - - return changed; -} - static DECLARE_TLV_DB_SCALE(vol_tlv, -10000, 25, 0);
static const struct snd_kcontrol_new cs35l56_controls[] = { @@ -210,44 +85,40 @@ static const struct snd_kcontrol_new cs35l56_controls[] = { };
static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_asp1tx1_enum, - SND_SOC_NOPM, - 0, 0, + CS35L56_ASP1TX1_INPUT, + 0, CS35L56_ASP_TXn_SRC_MASK, cs35l56_tx_input_texts, cs35l56_tx_input_values);
static const struct snd_kcontrol_new asp1_tx1_mux = - SOC_DAPM_ENUM_EXT("ASP1TX1 SRC", cs35l56_asp1tx1_enum, - cs35l56_dspwait_asp1tx_get, cs35l56_dspwait_asp1tx_put); + SOC_DAPM_ENUM("ASP1TX1 SRC", cs35l56_asp1tx1_enum);
static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_asp1tx2_enum, - SND_SOC_NOPM, - 1, 0, + CS35L56_ASP1TX2_INPUT, + 0, CS35L56_ASP_TXn_SRC_MASK, cs35l56_tx_input_texts, cs35l56_tx_input_values);
static const struct snd_kcontrol_new asp1_tx2_mux = - SOC_DAPM_ENUM_EXT("ASP1TX2 SRC", cs35l56_asp1tx2_enum, - cs35l56_dspwait_asp1tx_get, cs35l56_dspwait_asp1tx_put); + SOC_DAPM_ENUM("ASP1TX2 SRC", cs35l56_asp1tx2_enum);
static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_asp1tx3_enum, - SND_SOC_NOPM, - 2, 0, + CS35L56_ASP1TX3_INPUT, + 0, CS35L56_ASP_TXn_SRC_MASK, cs35l56_tx_input_texts, cs35l56_tx_input_values);
static const struct snd_kcontrol_new asp1_tx3_mux = - SOC_DAPM_ENUM_EXT("ASP1TX3 SRC", cs35l56_asp1tx3_enum, - cs35l56_dspwait_asp1tx_get, cs35l56_dspwait_asp1tx_put); + SOC_DAPM_ENUM("ASP1TX3 SRC", cs35l56_asp1tx3_enum);
static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_asp1tx4_enum, - SND_SOC_NOPM, - 3, 0, + CS35L56_ASP1TX4_INPUT, + 0, CS35L56_ASP_TXn_SRC_MASK, cs35l56_tx_input_texts, cs35l56_tx_input_values);
static const struct snd_kcontrol_new asp1_tx4_mux = - SOC_DAPM_ENUM_EXT("ASP1TX4 SRC", cs35l56_asp1tx4_enum, - cs35l56_dspwait_asp1tx_get, cs35l56_dspwait_asp1tx_put); + SOC_DAPM_ENUM("ASP1TX4 SRC", cs35l56_asp1tx4_enum);
static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_sdw1tx1_enum, CS35L56_SWIRE_DP3_CH1_INPUT, @@ -285,21 +156,6 @@ static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_sdw1tx4_enum, static const struct snd_kcontrol_new sdw1_tx4_mux = SOC_DAPM_ENUM("SDW1TX4 SRC", cs35l56_sdw1tx4_enum);
-static int cs35l56_asp1_cfg_event(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); - struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component); - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - /* Override register values set by firmware boot */ - return cs35l56_force_sync_asp1_registers_from_cache(&cs35l56->base); - default: - return 0; - } -} - static int cs35l56_play_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { @@ -336,9 +192,6 @@ static const struct snd_soc_dapm_widget cs35l56_dapm_widgets[] = { SND_SOC_DAPM_REGULATOR_SUPPLY("VDD_B", 0, 0), SND_SOC_DAPM_REGULATOR_SUPPLY("VDD_AMP", 0, 0),
- SND_SOC_DAPM_SUPPLY("ASP1 CFG", SND_SOC_NOPM, 0, 0, cs35l56_asp1_cfg_event, - SND_SOC_DAPM_PRE_PMU), - SND_SOC_DAPM_SUPPLY("PLAY", SND_SOC_NOPM, 0, 0, cs35l56_play_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
@@ -406,9 +259,6 @@ static const struct snd_soc_dapm_route cs35l56_audio_map[] = { { "AMP", NULL, "VDD_B" }, { "AMP", NULL, "VDD_AMP" },
- { "ASP1 Playback", NULL, "ASP1 CFG" }, - { "ASP1 Capture", NULL, "ASP1 CFG" }, - { "ASP1 Playback", NULL, "PLAY" }, { "SDW1 Playback", NULL, "PLAY" },
@@ -459,14 +309,9 @@ static int cs35l56_asp_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int f { struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(codec_dai->component); unsigned int val; - int ret;
dev_dbg(cs35l56->base.dev, "%s: %#x\n", __func__, fmt);
- ret = cs35l56_init_asp1_regs_for_driver_control(&cs35l56->base); - if (ret) - return ret; - switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) { case SND_SOC_DAIFMT_CBC_CFC: break; @@ -540,11 +385,6 @@ static int cs35l56_asp_dai_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx unsigned int rx_mask, int slots, int slot_width) { struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(dai->component); - int ret; - - ret = cs35l56_init_asp1_regs_for_driver_control(&cs35l56->base); - if (ret) - return ret;
if ((slots == 0) || (slot_width == 0)) { dev_dbg(cs35l56->base.dev, "tdm config cleared\n"); @@ -593,11 +433,6 @@ static int cs35l56_asp_dai_hw_params(struct snd_pcm_substream *substream, struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(dai->component); unsigned int rate = params_rate(params); u8 asp_width, asp_wl; - int ret; - - ret = cs35l56_init_asp1_regs_for_driver_control(&cs35l56->base); - if (ret) - return ret;
asp_wl = params_width(params); if (cs35l56->asp_slot_width) @@ -654,11 +489,7 @@ static int cs35l56_asp_dai_set_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(dai->component); - int freq_id, ret; - - ret = cs35l56_init_asp1_regs_for_driver_control(&cs35l56->base); - if (ret) - return ret; + int freq_id;
if (freq == 0) { cs35l56->sysclk_set = false; @@ -1039,13 +870,6 @@ static int cs35l56_component_probe(struct snd_soc_component *component) debugfs_create_bool("can_hibernate", 0444, debugfs_root, &cs35l56->base.can_hibernate); debugfs_create_bool("fw_patched", 0444, debugfs_root, &cs35l56->base.fw_patched);
- /* - * The widgets for the ASP1TX mixer can't be initialized - * until the firmware has been downloaded and rebooted. - */ - regcache_drop_region(cs35l56->base.regmap, CS35L56_ASP1TX1_INPUT, CS35L56_ASP1TX4_INPUT); - cs35l56->asp1_mixer_widgets_initialized = false; - queue_work(cs35l56->dsp_wq, &cs35l56->dsp_work);
return 0; @@ -1436,9 +1260,6 @@ int cs35l56_common_probe(struct cs35l56_private *cs35l56) cs35l56->base.cal_index = -1; cs35l56->speaker_id = -ENOENT;
- /* Assume that the firmware owns ASP1 until we know different */ - cs35l56->base.fw_owns_asp1 = true; - dev_set_drvdata(cs35l56->base.dev, cs35l56);
cs35l56_fill_supply_names(cs35l56->supplies); diff --git a/sound/soc/codecs/cs35l56.h b/sound/soc/codecs/cs35l56.h index b000e7365e40..200f695efca3 100644 --- a/sound/soc/codecs/cs35l56.h +++ b/sound/soc/codecs/cs35l56.h @@ -51,7 +51,6 @@ struct cs35l56_private { u8 asp_slot_count; bool tdm_mode; bool sysclk_set; - bool asp1_mixer_widgets_initialized; u8 old_sdw_clock_scale; };
diff --git a/sound/soc/codecs/wcd938x-sdw.c b/sound/soc/codecs/wcd938x-sdw.c index a1f04010da95..132c1d24f8f6 100644 --- a/sound/soc/codecs/wcd938x-sdw.c +++ b/sound/soc/codecs/wcd938x-sdw.c @@ -1252,12 +1252,12 @@ static int wcd9380_probe(struct sdw_slave *pdev, pdev->prop.lane_control_support = true; pdev->prop.simple_clk_stop_capable = true; if (wcd->is_tx) { - pdev->prop.source_ports = GENMASK(WCD938X_MAX_SWR_PORTS, 0); + pdev->prop.source_ports = GENMASK(WCD938X_MAX_SWR_PORTS - 1, 0); pdev->prop.src_dpn_prop = wcd938x_dpn_prop; wcd->ch_info = &wcd938x_sdw_tx_ch_info[0]; pdev->prop.wake_capable = true; } else { - pdev->prop.sink_ports = GENMASK(WCD938X_MAX_SWR_PORTS, 0); + pdev->prop.sink_ports = GENMASK(WCD938X_MAX_SWR_PORTS - 1, 0); pdev->prop.sink_dpn_prop = wcd938x_dpn_prop; wcd->ch_info = &wcd938x_sdw_rx_ch_info[0]; } diff --git a/sound/soc/codecs/wcd939x-sdw.c b/sound/soc/codecs/wcd939x-sdw.c index 8acb5651c5bc..392f4dcab3e0 100644 --- a/sound/soc/codecs/wcd939x-sdw.c +++ b/sound/soc/codecs/wcd939x-sdw.c @@ -1453,12 +1453,12 @@ static int wcd9390_probe(struct sdw_slave *pdev, const struct sdw_device_id *id) pdev->prop.lane_control_support = true; pdev->prop.simple_clk_stop_capable = true; if (wcd->is_tx) { - pdev->prop.source_ports = GENMASK(WCD939X_MAX_TX_SWR_PORTS, 0); + pdev->prop.source_ports = GENMASK(WCD939X_MAX_TX_SWR_PORTS - 1, 0); pdev->prop.src_dpn_prop = wcd939x_tx_dpn_prop; wcd->ch_info = &wcd939x_sdw_tx_ch_info[0]; pdev->prop.wake_capable = true; } else { - pdev->prop.sink_ports = GENMASK(WCD939X_MAX_RX_SWR_PORTS, 0); + pdev->prop.sink_ports = GENMASK(WCD939X_MAX_RX_SWR_PORTS - 1, 0); pdev->prop.sink_dpn_prop = wcd939x_rx_dpn_prop; wcd->ch_info = &wcd939x_sdw_rx_ch_info[0]; } diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c index 1253695bebd8..53b828f68102 100644 --- a/sound/soc/codecs/wsa881x.c +++ b/sound/soc/codecs/wsa881x.c @@ -1152,7 +1152,7 @@ static int wsa881x_probe(struct sdw_slave *pdev, wsa881x->sconfig.frame_rate = 48000; wsa881x->sconfig.direction = SDW_DATA_DIR_RX; wsa881x->sconfig.type = SDW_STREAM_PDM; - pdev->prop.sink_ports = GENMASK(WSA881X_MAX_SWR_PORTS, 0); + pdev->prop.sink_ports = GENMASK(WSA881X_MAX_SWR_PORTS - 1, 0); pdev->prop.sink_dpn_prop = wsa_sink_dpn_prop; pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY; pdev->prop.clk_stop_mode1 = true; diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c index a2e86ef7d18f..2169d9398984 100644 --- a/sound/soc/codecs/wsa883x.c +++ b/sound/soc/codecs/wsa883x.c @@ -1399,7 +1399,15 @@ static int wsa883x_probe(struct sdw_slave *pdev, wsa883x->sconfig.direction = SDW_DATA_DIR_RX; wsa883x->sconfig.type = SDW_STREAM_PDM;
- pdev->prop.sink_ports = GENMASK(WSA883X_MAX_SWR_PORTS, 0); + /** + * Port map index starts with 0, however the data port for this codec + * are from index 1 + */ + if (of_property_read_u32_array(dev->of_node, "qcom,port-mapping", &pdev->m_port_map[1], + WSA883X_MAX_SWR_PORTS)) + dev_dbg(dev, "Static Port mapping not specified\n"); + + pdev->prop.sink_ports = GENMASK(WSA883X_MAX_SWR_PORTS - 1, 0); pdev->prop.simple_clk_stop_capable = true; pdev->prop.sink_dpn_prop = wsa_sink_dpn_prop; pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY; diff --git a/sound/soc/codecs/wsa884x.c b/sound/soc/codecs/wsa884x.c index a9767ef0e39d..de4caf61eef9 100644 --- a/sound/soc/codecs/wsa884x.c +++ b/sound/soc/codecs/wsa884x.c @@ -1887,7 +1887,15 @@ static int wsa884x_probe(struct sdw_slave *pdev, wsa884x->sconfig.direction = SDW_DATA_DIR_RX; wsa884x->sconfig.type = SDW_STREAM_PDM;
- pdev->prop.sink_ports = GENMASK(WSA884X_MAX_SWR_PORTS, 0); + /** + * Port map index starts with 0, however the data port for this codec + * are from index 1 + */ + if (of_property_read_u32_array(dev->of_node, "qcom,port-mapping", &pdev->m_port_map[1], + WSA884X_MAX_SWR_PORTS)) + dev_dbg(dev, "Static Port mapping not specified\n"); + + pdev->prop.sink_ports = GENMASK(WSA884X_MAX_SWR_PORTS - 1, 0); pdev->prop.simple_clk_stop_capable = true; pdev->prop.sink_dpn_prop = wsa884x_sink_dpn_prop; pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY; diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c index 59abe0b3c59f..486c56a84552 100644 --- a/sound/soc/meson/axg-fifo.c +++ b/sound/soc/meson/axg-fifo.c @@ -207,25 +207,18 @@ static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id) status = FIELD_GET(STATUS1_INT_STS, status); axg_fifo_ack_irq(fifo, status);
- /* Use the thread to call period elapsed on nonatomic links */ - if (status & FIFO_INT_COUNT_REPEAT) - return IRQ_WAKE_THREAD; + if (status & ~FIFO_INT_COUNT_REPEAT) + dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n", + status);
- dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n", - status); + if (status & FIFO_INT_COUNT_REPEAT) { + snd_pcm_period_elapsed(ss); + return IRQ_HANDLED; + }
return IRQ_NONE; }
-static irqreturn_t axg_fifo_pcm_irq_block_thread(int irq, void *dev_id) -{ - struct snd_pcm_substream *ss = dev_id; - - snd_pcm_period_elapsed(ss); - - return IRQ_HANDLED; -} - int axg_fifo_pcm_open(struct snd_soc_component *component, struct snd_pcm_substream *ss) { @@ -251,8 +244,9 @@ int axg_fifo_pcm_open(struct snd_soc_component *component, if (ret) return ret;
- ret = request_threaded_irq(fifo->irq, axg_fifo_pcm_irq_block, - axg_fifo_pcm_irq_block_thread, + /* Use the threaded irq handler only with non-atomic links */ + ret = request_threaded_irq(fifo->irq, NULL, + axg_fifo_pcm_irq_block, IRQF_ONESHOT, dev_name(dev), ss); if (ret) return ret; diff --git a/sound/soc/sof/mediatek/mt8195/mt8195.c b/sound/soc/sof/mediatek/mt8195/mt8195.c index 31dc98d1b1d8..8d3fc167cd81 100644 --- a/sound/soc/sof/mediatek/mt8195/mt8195.c +++ b/sound/soc/sof/mediatek/mt8195/mt8195.c @@ -573,7 +573,7 @@ static const struct snd_sof_dsp_ops sof_mt8195_ops = { static struct snd_sof_of_mach sof_mt8195_machs[] = { { .compatible = "google,tomato", - .sof_tplg_filename = "sof-mt8195-mt6359-rt1019-rt5682-dts.tplg" + .sof_tplg_filename = "sof-mt8195-mt6359-rt1019-rt5682.tplg" }, { .compatible = "mediatek,mt8195", .sof_tplg_filename = "sof-mt8195.tplg" diff --git a/sound/soc/sti/sti_uniperif.c b/sound/soc/sti/sti_uniperif.c index ba824f14a39c..a7956e5a4ee5 100644 --- a/sound/soc/sti/sti_uniperif.c +++ b/sound/soc/sti/sti_uniperif.c @@ -352,7 +352,7 @@ static int sti_uniperiph_resume(struct snd_soc_component *component) return ret; }
-static int sti_uniperiph_dai_probe(struct snd_soc_dai *dai) +int sti_uniperiph_dai_probe(struct snd_soc_dai *dai) { struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai); struct sti_uniperiph_dai *dai_data = &priv->dai_data; diff --git a/sound/soc/sti/uniperif.h b/sound/soc/sti/uniperif.h index 2a5de328501c..74e51f0ff85c 100644 --- a/sound/soc/sti/uniperif.h +++ b/sound/soc/sti/uniperif.h @@ -1380,6 +1380,7 @@ int uni_reader_init(struct platform_device *pdev, struct uniperif *reader);
/* common */ +int sti_uniperiph_dai_probe(struct snd_soc_dai *dai); int sti_uniperiph_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt);
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c index dd9013c47664..6d1ce030963c 100644 --- a/sound/soc/sti/uniperif_player.c +++ b/sound/soc/sti/uniperif_player.c @@ -1038,6 +1038,7 @@ static const struct snd_soc_dai_ops uni_player_dai_ops = { .startup = uni_player_startup, .shutdown = uni_player_shutdown, .prepare = uni_player_prepare, + .probe = sti_uniperiph_dai_probe, .trigger = uni_player_trigger, .hw_params = sti_uniperiph_dai_hw_params, .set_fmt = sti_uniperiph_dai_set_fmt, diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c index 065c5f0d1f5f..05ea2b794eb9 100644 --- a/sound/soc/sti/uniperif_reader.c +++ b/sound/soc/sti/uniperif_reader.c @@ -401,6 +401,7 @@ static const struct snd_soc_dai_ops uni_reader_dai_ops = { .startup = uni_reader_startup, .shutdown = uni_reader_shutdown, .prepare = uni_reader_prepare, + .probe = sti_uniperiph_dai_probe, .trigger = uni_reader_trigger, .hw_params = sti_uniperiph_dai_hw_params, .set_fmt = sti_uniperiph_dai_set_fmt, diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c index f4437015d43a..9df49a880b75 100644 --- a/sound/usb/line6/driver.c +++ b/sound/usb/line6/driver.c @@ -286,12 +286,14 @@ static void line6_data_received(struct urb *urb) { struct usb_line6 *line6 = (struct usb_line6 *)urb->context; struct midi_buffer *mb = &line6->line6midi->midibuf_in; + unsigned long flags; int done;
if (urb->status == -ESHUTDOWN) return;
if (line6->properties->capabilities & LINE6_CAP_CONTROL_MIDI) { + spin_lock_irqsave(&line6->line6midi->lock, flags); done = line6_midibuf_write(mb, urb->transfer_buffer, urb->actual_length);
@@ -300,12 +302,15 @@ static void line6_data_received(struct urb *urb) dev_dbg(line6->ifcdev, "%d %d buffer overflow - message skipped\n", done, urb->actual_length); } + spin_unlock_irqrestore(&line6->line6midi->lock, flags);
for (;;) { + spin_lock_irqsave(&line6->line6midi->lock, flags); done = line6_midibuf_read(mb, line6->buffer_message, LINE6_MIDI_MESSAGE_MAXLEN, LINE6_MIDIBUF_READ_RX); + spin_unlock_irqrestore(&line6->line6midi->lock, flags);
if (done <= 0) break; diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 73abc38a5400..f13a8d63a019 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -2594,6 +2594,10 @@ YAMAHA_DEVICE(0x7010, "UB99"), } },
+/* Stanton ScratchAmp */ +{ USB_DEVICE(0x103d, 0x0100) }, +{ USB_DEVICE(0x103d, 0x0101) }, + /* Novation EMS devices */ { USB_DEVICE_VENDOR_SPEC(0x1235, 0x0001), diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c index 920aee41bd58..6cc69900b310 100644 --- a/tools/testing/selftests/bpf/prog_tests/send_signal.c +++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c @@ -156,7 +156,8 @@ static void test_send_signal_tracepoint(bool signal_thread) static void test_send_signal_perf(bool signal_thread) { struct perf_event_attr attr = { - .sample_period = 1, + .freq = 1, + .sample_freq = 1000, .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_CLOCK, }; diff --git a/tools/testing/selftests/devices/ksft.py b/tools/testing/selftests/devices/ksft.py index cd89fb2bc10e..bf215790a89d 100644 --- a/tools/testing/selftests/devices/ksft.py +++ b/tools/testing/selftests/devices/ksft.py @@ -70,7 +70,7 @@ def test_result(condition, description=""):
def finished(): - if ksft_cnt["pass"] == ksft_num_tests: + if ksft_cnt["pass"] + ksft_cnt["skip"] == ksft_num_tests: exit_code = KSFT_PASS else: exit_code = KSFT_FAIL diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile index 3b49bc3d0a3b..478cd26f62fd 100644 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@ -106,7 +106,7 @@ endif
endif
-ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 powerpc riscv64 s390x sparc64 x86_64)) +ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 powerpc riscv64 s390x sparc64 x86_64 s390)) TEST_GEN_FILES += va_high_addr_switch TEST_GEN_FILES += virtual_address_range TEST_GEN_FILES += write_to_hugetlbfs diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index 7043984b7e74..a3293043c85d 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -1415,18 +1415,28 @@ chk_add_nr() local add_nr=$1 local echo_nr=$2 local port_nr=${3:-0} - local syn_nr=${4:-$port_nr} - local syn_ack_nr=${5:-$port_nr} - local ack_nr=${6:-$port_nr} - local mis_syn_nr=${7:-0} - local mis_ack_nr=${8:-0} + local ns_invert=${4:-""} + local syn_nr=$port_nr + local syn_ack_nr=$port_nr + local ack_nr=$port_nr + local mis_syn_nr=0 + local mis_ack_nr=0 + local ns_tx=$ns1 + local ns_rx=$ns2 + local extra_msg="" local count local timeout
- timeout=$(ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout) + if [[ $ns_invert = "invert" ]]; then + ns_tx=$ns2 + ns_rx=$ns1 + extra_msg="invert" + fi + + timeout=$(ip netns exec ${ns_tx} sysctl -n net.mptcp.add_addr_timeout)
print_check "add" - count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtAddAddr") + count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtAddAddr") if [ -z "$count" ]; then print_skip # if the test configured a short timeout tolerate greater then expected @@ -1438,7 +1448,7 @@ chk_add_nr() fi
print_check "echo" - count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtEchoAdd") + count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtEchoAdd") if [ -z "$count" ]; then print_skip elif [ "$count" != "$echo_nr" ]; then @@ -1449,7 +1459,7 @@ chk_add_nr()
if [ $port_nr -gt 0 ]; then print_check "pt" - count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtPortAdd") + count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtPortAdd") if [ -z "$count" ]; then print_skip elif [ "$count" != "$port_nr" ]; then @@ -1459,7 +1469,7 @@ chk_add_nr() fi
print_check "syn" - count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinPortSynRx") + count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPJoinPortSynRx") if [ -z "$count" ]; then print_skip elif [ "$count" != "$syn_nr" ]; then @@ -1470,7 +1480,7 @@ chk_add_nr() fi
print_check "synack" - count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtMPJoinPortSynAckRx") + count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPJoinPortSynAckRx") if [ -z "$count" ]; then print_skip elif [ "$count" != "$syn_ack_nr" ]; then @@ -1481,7 +1491,7 @@ chk_add_nr() fi
print_check "ack" - count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinPortAckRx") + count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPJoinPortAckRx") if [ -z "$count" ]; then print_skip elif [ "$count" != "$ack_nr" ]; then @@ -1492,7 +1502,7 @@ chk_add_nr() fi
print_check "syn" - count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMismatchPortSynRx") + count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMismatchPortSynRx") if [ -z "$count" ]; then print_skip elif [ "$count" != "$mis_syn_nr" ]; then @@ -1503,7 +1513,7 @@ chk_add_nr() fi
print_check "ack" - count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMismatchPortAckRx") + count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMismatchPortAckRx") if [ -z "$count" ]; then print_skip elif [ "$count" != "$mis_ack_nr" ]; then @@ -1513,6 +1523,8 @@ chk_add_nr() print_ok fi fi + + print_info "$extra_msg" }
chk_add_tx_nr() @@ -1977,6 +1989,21 @@ signal_address_tests() chk_add_nr 1 1 fi
+ # uncommon: subflow and signal flags on the same endpoint + # or because the user wrongly picked both, but still expects the client + # to create additional subflows + if reset "subflow and signal together"; then + pm_nl_set_limits $ns1 0 2 + pm_nl_set_limits $ns2 0 2 + pm_nl_add_endpoint $ns2 10.0.3.2 flags signal,subflow + run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 1 1 1 + chk_add_nr 1 1 0 invert # only initiated by ns2 + chk_add_nr 0 0 0 # none initiated by ns1 + chk_rst_nr 0 0 invert # no RST sent by the client + chk_rst_nr 0 0 # no RST sent by the server + fi + # accept and use add_addr with additional subflows if reset "multiple subflows and signal"; then pm_nl_set_limits $ns1 0 3
linux-stable-mirror@lists.linaro.org