I'm announcing the release of the 6.0.11 kernel.
All users of the 6.0 kernel series must upgrade.
The updated 6.0.y git tree can be found at: git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-6.0.y and can be browsed at the normal kernel.org git web browser: https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git%3Ba=summa...
thanks,
greg k-h
------------
Documentation/devicetree/bindings/iio/adc/aspeed,ast2600-adc.yaml | 7 Makefile | 2 arch/arm/boot/dts/am335x-pcm-953.dtsi | 28 - arch/arm/boot/dts/at91sam9g20ek_common.dtsi | 9 arch/arm/boot/dts/imx6q-prti6q.dts | 4 arch/arm/mach-mxs/mach-mxs.c | 4 arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts | 2 arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts | 7 arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts | 2 arch/arm64/include/asm/syscall_wrapper.h | 2 arch/loongarch/include/asm/pgtable.h | 8 arch/loongarch/kernel/process.c | 9 arch/mips/include/asm/fw/fw.h | 2 arch/mips/pic32/pic32mzda/early_console.c | 13 arch/mips/pic32/pic32mzda/init.c | 2 arch/nios2/boot/Makefile | 2 arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts | 38 + arch/riscv/kernel/vdso/Makefile | 3 arch/riscv/kernel/vdso/vdso.lds.S | 2 arch/s390/Kconfig | 6 arch/s390/Makefile | 2 arch/s390/boot/Makefile | 3 arch/s390/boot/startup.c | 3 arch/s390/kernel/crash_dump.c | 2 arch/x86/hyperv/hv_init.c | 54 +- arch/x86/include/asm/cpufeatures.h | 3 arch/x86/kernel/cpu/tsx.c | 38 - arch/x86/kvm/mmu/mmu.c | 13 arch/x86/kvm/svm/nested.c | 6 arch/x86/kvm/svm/svm.c | 16 arch/x86/kvm/vmx/nested.c | 3 arch/x86/kvm/x86.c | 18 arch/x86/kvm/xen.c | 32 + arch/x86/mm/ioremap.c | 8 arch/x86/power/cpu.c | 23 - block/bfq-cgroup.c | 4 block/blk-mq.c | 7 block/blk-settings.c | 1 block/blk.h | 1 drivers/acpi/video_detect.c | 14 drivers/android/binder_alloc.c | 7 drivers/bus/intel-ixp4xx-eb.c | 9 drivers/bus/sunxi-rsb.c | 38 - drivers/cpufreq/Kconfig.x86 | 2 drivers/cpufreq/amd-pstate.c | 21 drivers/dma-buf/dma-buf.c | 23 - drivers/dma-buf/dma-heap.c | 28 - drivers/fpga/Kconfig | 4 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c | 1 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 4 drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | 1 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 8 drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 6 drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 16 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 26 - drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 26 + drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 2 drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 6 drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 31 - drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm | 27 + drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 44 +- drivers/gpu/drm/amd/display/dc/dc.h | 3 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c | 3 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c | 213 ++++------ drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c | 1 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c | 11 drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c | 1 drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 22 - drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c | 1 drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c | 2 drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h | 2 drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c | 8 drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c | 14 drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c | 9 drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c | 25 - drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 4 drivers/gpu/drm/display/drm_dp_mst_topology.c | 2 drivers/gpu/drm/drm_panel_orientation_quirks.c | 12 drivers/gpu/drm/i915/display/intel_display_power.c | 8 drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 4 drivers/gpu/drm/i915/gt/intel_gt.c | 5 drivers/gpu/drm/i915/gvt/kvmgt.c | 3 drivers/gpu/drm/tegra/drm.c | 4 drivers/gpu/host1x/dev.c | 4 drivers/hv/channel_mgmt.c | 6 drivers/hv/vmbus_drv.c | 1 drivers/iio/accel/bma400_core.c | 4 drivers/iio/adc/aspeed_adc.c | 11 drivers/iio/industrialio-sw-trigger.c | 6 drivers/iio/light/apds9960.c | 12 drivers/input/misc/soc_button_array.c | 14 drivers/input/mouse/synaptics.c | 1 drivers/input/serio/i8042-x86ia64io.h | 8 drivers/input/touchscreen/goodix.c | 11 drivers/md/dm-integrity.c | 21 drivers/md/dm-log-writes.c | 1 drivers/net/arcnet/com20020_cs.c | 11 drivers/net/bonding/bond_main.c | 17 drivers/net/can/usb/gs_usb.c | 39 - drivers/net/dsa/sja1105/sja1105_mdio.c | 6 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 12 drivers/net/ethernet/cavium/liquidio/lio_main.c | 4 drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 4 drivers/net/ethernet/davicom/dm9051.c | 4 drivers/net/ethernet/engleder/tsnep_main.c | 57 ++ drivers/net/ethernet/freescale/enetc/enetc.c | 32 - drivers/net/ethernet/freescale/enetc/enetc.h | 10 drivers/net/ethernet/freescale/enetc/enetc_qos.c | 77 +-- drivers/net/ethernet/intel/iavf/iavf.h | 1 drivers/net/ethernet/intel/iavf/iavf_main.c | 41 + drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 8 drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c | 3 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c | 2 drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c | 7 drivers/net/ethernet/marvell/prestera/prestera_main.c | 1 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 12 drivers/net/ethernet/mellanox/mlx4/qp.c | 3 drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 47 +- drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h | 45 ++ drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c | 2 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c | 16 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h | 3 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 17 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 2 drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c | 9 drivers/net/ethernet/mellanox/mlx5/core/main.c | 9 drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c | 88 ++++ drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c | 14 drivers/net/ethernet/netronome/nfp/nfp_devlink.c | 2 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c | 3 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 6 drivers/net/ethernet/qlogic/qla3xxx.c | 1 drivers/net/ethernet/sfc/ef100_netdev.c | 1 drivers/net/ipvlan/ipvlan.h | 1 drivers/net/ipvlan/ipvlan_main.c | 2 drivers/net/macsec.c | 28 - drivers/net/phy/at803x.c | 4 drivers/net/usb/cdc_ncm.c | 1 drivers/net/usb/qmi_wwan.c | 1 drivers/net/virtio_net.c | 3 drivers/net/wireless/ath/ath11k/qmi.h | 2 drivers/net/wireless/cisco/airo.c | 18 drivers/net/wireless/mac80211_hwsim.c | 5 drivers/net/wireless/microchip/wilc1000/cfg80211.c | 39 + drivers/net/wireless/microchip/wilc1000/hif.c | 21 drivers/net/wwan/iosm/iosm_ipc_coredump.c | 1 drivers/net/wwan/iosm/iosm_ipc_devlink.c | 1 drivers/net/wwan/iosm/iosm_ipc_pcie.c | 2 drivers/net/wwan/t7xx/t7xx_modem_ops.c | 2 drivers/nfc/st-nci/se.c | 49 +- drivers/nvme/host/core.c | 3 drivers/nvme/host/pci.c | 2 drivers/nvme/target/configfs.c | 7 drivers/pci/controller/pci-hyperv.c | 90 +++- drivers/pinctrl/qcom/pinctrl-sc8280xp.c | 4 drivers/platform/surface/surface_aggregator_registry.c | 37 + drivers/platform/x86/acer-wmi.c | 9 drivers/platform/x86/asus-wmi.c | 2 drivers/platform/x86/hp-wmi.c | 3 drivers/platform/x86/ideapad-laptop.c | 62 ++ drivers/platform/x86/intel/hid.c | 3 drivers/platform/x86/intel/pmt/class.c | 31 + drivers/platform/x86/thinkpad_acpi.c | 8 drivers/platform/x86/touchscreen_dmi.c | 25 + drivers/power/supply/ab8500_btemp.c | 9 drivers/power/supply/ip5xxx_power.c | 2 drivers/regulator/core.c | 8 drivers/regulator/rt5759-regulator.c | 1 drivers/regulator/twl6030-regulator.c | 2 drivers/s390/block/dasd_eckd.c | 6 drivers/s390/crypto/ap_bus.c | 5 drivers/s390/crypto/zcrypt_msgtype6.c | 21 drivers/scsi/ibmvscsi/ibmvfc.c | 14 drivers/scsi/mpi3mr/mpi3mr_os.c | 3 drivers/scsi/scsi_debug.c | 7 drivers/scsi/scsi_transport_iscsi.c | 31 - drivers/scsi/storvsc_drv.c | 69 +-- drivers/spi/spi-dw-dma.c | 3 drivers/spi/spi-imx.c | 13 drivers/spi/spi-stm32.c | 2 drivers/spi/spi-tegra210-quad.c | 9 drivers/tee/optee/device.c | 2 drivers/tty/n_gsm.c | 69 +-- drivers/tty/serial/8250/8250_omap.c | 7 drivers/usb/cdns3/cdnsp-gadget.c | 12 drivers/usb/cdns3/cdnsp-ring.c | 17 drivers/usb/dwc3/dwc3-exynos.c | 11 drivers/usb/dwc3/gadget.c | 22 - drivers/virt/coco/sev-guest/sev-guest.c | 84 +++ drivers/xen/platform-pci.c | 7 drivers/xen/xen-pciback/conf_space_capability.c | 9 fs/btrfs/ioctl.c | 23 - fs/btrfs/sysfs.c | 7 fs/btrfs/tree-log.c | 59 ++ fs/btrfs/zoned.c | 9 fs/ceph/caps.c | 50 -- fs/cifs/cifsfs.c | 4 fs/cifs/sess.c | 4 fs/ext4/extents.c | 18 fs/fs-writeback.c | 30 - fs/fscache/volume.c | 7 fs/fuse/file.c | 37 - fs/nfsd/vfs.c | 7 fs/nilfs2/sufile.c | 8 fs/zonefs/super.c | 60 ++ fs/zonefs/zonefs.h | 6 include/linux/blkdev.h | 1 include/linux/bpf.h | 39 + include/linux/fault-inject.h | 7 include/linux/fscache.h | 2 include/linux/mlx5/driver.h | 1 include/net/neighbour.h | 2 include/sound/sof/info.h | 4 include/uapi/linux/audit.h | 2 init/Kconfig | 2 io_uring/filetable.c | 2 io_uring/io_uring.h | 9 io_uring/poll.c | 49 +- kernel/bpf/dispatcher.c | 22 - kernel/gcov/clang.c | 2 lib/Kconfig.debug | 1 lib/fault-inject.c | 13 lib/vdso/Makefile | 2 mm/damon/sysfs.c | 4 mm/failslab.c | 12 mm/memcontrol.c | 2 mm/page_alloc.c | 7 mm/vmscan.c | 24 - net/9p/trans_fd.c | 2 net/core/flow_dissector.c | 2 net/core/neighbour.c | 58 +- net/dccp/ipv4.c | 2 net/dccp/ipv6.c | 2 net/ipv4/Kconfig | 10 net/ipv4/esp4_offload.c | 3 net/ipv4/fib_trie.c | 4 net/ipv4/inet_hashtables.c | 10 net/ipv4/ip_input.c | 5 net/ipv4/netfilter/ipt_CLUSTERIP.c | 4 net/ipv4/tcp_ipv4.c | 2 net/ipv6/esp6_offload.c | 3 net/ipv6/tcp_ipv6.c | 2 net/ipv6/xfrm6_policy.c | 6 net/key/af_key.c | 34 + net/mac80211/main.c | 8 net/mac80211/mesh_pathtbl.c | 2 net/netfilter/ipset/ip_set_hash_gen.h | 2 net/netfilter/ipset/ip_set_hash_ip.c | 8 net/netfilter/nf_conntrack_core.c | 2 net/netfilter/nf_conntrack_netlink.c | 24 - net/netfilter/nf_conntrack_standalone.c | 2 net/netfilter/nf_flow_table_offload.c | 4 net/netfilter/nf_tables_api.c | 6 net/netfilter/nft_ct.c | 6 net/netfilter/xt_connmark.c | 18 net/nfc/nci/core.c | 2 net/nfc/nci/data.c | 4 net/openvswitch/conntrack.c | 8 net/rxrpc/ar-internal.h | 1 net/rxrpc/conn_client.c | 38 + net/sched/Kconfig | 2 net/sched/act_connmark.c | 4 net/sched/act_ct.c | 8 net/sched/act_ctinfo.c | 6 net/tipc/discover.c | 5 net/tipc/topsrv.c | 20 net/wireless/util.c | 6 net/xfrm/xfrm_device.c | 15 net/xfrm/xfrm_replay.c | 2 sound/hda/intel-dsp-config.c | 5 sound/soc/amd/yc/acp6x-mach.c | 7 sound/soc/codecs/hdac_hda.h | 4 sound/soc/codecs/max98373-i2c.c | 4 sound/soc/codecs/sgtl5000.c | 1 sound/soc/intel/boards/bytcht_es8316.c | 7 sound/soc/intel/boards/sof_es8336.c | 60 ++ sound/soc/intel/common/soc-acpi-intel-icl-match.c | 13 sound/soc/soc-pcm.c | 5 sound/soc/sof/ipc3-topology.c | 15 sound/soc/stm/stm32_adfsdm.c | 11 sound/usb/endpoint.c | 3 sound/usb/quirks.c | 2 sound/usb/usbaudio.h | 3 tools/iio/iio_generic_buffer.c | 4 tools/testing/selftests/bpf/verifier/ref_tracking.c | 36 + tools/testing/selftests/net/io_uring_zerocopy_tx.sh | 2 tools/testing/selftests/net/mptcp/mptcp_join.sh | 6 tools/testing/selftests/net/mptcp/mptcp_sockopt.sh | 9 tools/testing/selftests/net/mptcp/simult_flows.sh | 5 tools/testing/selftests/net/udpgro.sh | 4 tools/testing/selftests/net/udpgro_bench.sh | 2 tools/testing/selftests/net/udpgro_frglist.sh | 2 virt/kvm/pfncache.c | 7 293 files changed, 2598 insertions(+), 1226 deletions(-)
Adrien Thierry (1): selftests/net: give more time to udpgro bg processes to complete startup
Ai Chao (1): ALSA: usb-audio: add quirk to fix Hamedal C20 disconnect issue
Alejandro Concepción Rodríguez (1): iio: light: apds9960: fix wrong register for gesture gain
Aleksandr Miloserdov (1): nvmet: fix memory leak in nvmet_subsys_attr_model_store_locked
Alex Deucher (2): drm/amdgpu/psp: don't free PSP buffers on suspend drm/amdgpu: Partially revert "drm/amdgpu: update drm_display_info correctly when the edid is read"
Alexandre Belloni (1): init/Kconfig: fix CC_HAS_ASM_GOTO_TIED_OUTPUT test with dash
Alvin Lee (2): drm/amd/display: Add debug option for allocating extra way for cursor drm/amd/display: Update MALL SS NumWays calculation
Aman Dhoot (1): Input: synaptics - switch touchpad on HP Laptop 15-da3001TU to RMI mode
Anand Jain (3): btrfs: free btrfs_path before copying inodes to userspace btrfs: free btrfs_path before copying fspath to userspace btrfs: free btrfs_path before copying subvol info to userspace
Andreas Kemnade (1): regulator: twl6030: re-add TWL6032_SUBCLASS
Andrzej Hajda (1): drm/i915: fix TLB invalidation for Gen12 video and compute engines
Aneesh Kumar K.V (1): mm/cgroup/reclaim: fix dirty pages throttling on cgroup v1
Anjana Hari (1): pinctrl: qcom: sc8280xp: Rectify UFS reset pins
Arnav Rawat (1): platform/x86: ideapad-laptop: Fix interrupt storm on fn-lock toggle on some Yoga laptops
Asher Song (1): Revert "drm/amdgpu: Revert "drm/amdgpu: getting fan speed pwm for vega10 properly""
Aurabindo Pillai (1): drm/amd/display: Zeromem mypipe heap struct before using it
Baokun Li (1): ext4: fix use-after-free in ext4_ext_shift_extents
Bart Van Assche (1): scsi: scsi_debug: Make the READ CAPACITY response compliant with ZBC
Billy Tsai (2): iio: adc: aspeed: Remove the trim valid dts property. dt-bindings: iio: adc: Remove the property "aspeed,trim-data-valid"
Brent Mendelsohn (1): ASoC: amd: yc: Add Alienware m17 R5 AMD into DMI table
Brian King (1): scsi: ibmvfc: Avoid path failures during live migration
Carlos Llamas (1): binder: validate alloc->mm in ->mmap() handler
Chaitanya Dhere (1): drm/amd/display: Fix FCLK deviation and tool compile issues
Chen Zhongjin (3): xfrm: Fix ignored return value in xfrm6_init() iio: core: Fix entry not deleted when iio_register_sw_trigger_type() fails nilfs2: fix nilfs_sufile_mark_dirty() not set segment usage as dirty
Chen-Yu Tsai (1): arm64: dts: rockchip: Fix Pine64 Quartz4-B PMIC interrupt
ChenXiaoSong (1): cifs: fix missing unlock in cifs_file_copychunk_range()
Chris Mi (1): net/mlx5e: Offload rule only when all encaps are valid
Christian König (1): drm/amdgpu: always register an MMU notifier for userptr
Christian Langrock (1): xfrm: replay: Fix ESN wrap around for GSO
Christoph Hellwig (3): blk-mq: fix queue reference leak on blk_mq_alloc_disk_for_queue failure btrfs: zoned: fix missing endianness conversion in sb_write_pointer btrfs: use kvcalloc in btrfs_get_dev_zone_info
Chuck Lever (1): NFSD: Fix reads with a non-zero offset that don't end on a page boundary
Damien Le Moal (2): zonefs: Fix active zone accounting zonefs: fix zone report size in __zonefs_io_error()
Dan Carpenter (1): cifs: Use after free in debug code
Daniel Xu (1): netfilter: conntrack: Fix data-races around ct mark
David Belanger (1): drm/amdgpu: Enable SA software trap.
David E. Box (1): platform/x86/intel/pmt: Sapphire Rapids PMT errata fix
David Howells (2): rxrpc: Fix race between conn bundle lookup and bundle removal [ZDI-CAN-15975] fscache: fix OOB Read in __fscache_acquire_volume
David Woodhouse (3): KVM: x86/xen: Only do in-kernel acceleration of hypercalls for guest CPL0 KVM: x86/xen: Validate port number in SCHEDOP_poll KVM: Update gfn_to_pfn_cache khva when it moves within the same page
Dawei Li (1): dma-buf: fix racing conflict of dma_heap_add()
Detlev Casanova (1): ASoC: sgtl5000: Reset the CHIP_CLK_CTRL reg on remove
Dexuan Cui (1): PCI: hv: Only reuse existing IRTE allocation for Multi-MSI
Diana Wang (1): nfp: fill splittable of devlink_port_attrs correctly
Dillon Varone (2): drm/amd/display: use uclk pstate latency for fw assisted mclk validation dcn32 drm/amd/display: Update soc bounding box for dcn32/dcn321
Dominik Haller (1): ARM: dts: am335x-pcm-953: Define fixed regulators in root node
Dong Chenchen (1): iio: accel: bma400: Fix memory leak in bma400_get_steps_reg()
Emil Renner Berthing (1): riscv: dts: sifive unleashed: Add PWM controlled LEDs
Enrico Sau (1): net: usb: qmi_wwan: add Telit 0x103a composition
Eric Huang (1): drm/amdkfd: Fix a memory limit issue
Eyal Birger (1): xfrm: fix "disable_policy" on ipv4 early demux
Fabio Estevam (1): ARM: dts: imx6q-prti6q: Fix ref/tcxo-clock-frequency properties
Fedor Pchelkin (2): Revert "tty: n_gsm: avoid call of sleeping functions from atomic context" Revert "tty: n_gsm: replace kicktimer with delayed_work"
Felix Fietkau (1): netfilter: flowtable_offload: add missing locking
Filipe Manana (1): btrfs: do not modify log tree while holding a leaf from fs tree locked
Frieder Schrempf (1): spi: spi-imx: Fix spi_bus_clk if requested clock is higher than input clock
Gaosheng Cui (1): audit: fix undefined behavior in bit shift for AUDIT_BIT
George Shen (1): drm/amd/display: Fix calculation for cursor CAB allocation
Gerhard Engleder (1): tsnep: Fix rotten packets
Gleb Mazovetskiy (1): tcp: configurable source port perturb table size
Greg Kroah-Hartman (2): lib/vdso: use "grep -E" instead of "egrep" Linux 6.0.11
Guchun Chen (1): drm/amdgpu: disable BACO support on more cards
Hamza Mahfooz (1): drm/amd/display: only fill dirty rectangles when PSR is enabled
Hangbin Liu (1): bonding: fix ICMPv6 header handling when receiving IPv6 messages
Hanjun Guo (1): net: wwan: t7xx: Fix the ACPI memory leak
Hans de Goede (10): ACPI: video: Add backlight=native DMI quirk for Dell G15 5515 platform/x86: touchscreen_dmi: Add info for the RCA Cambio W101 v2 2-in-1 drm: panel-orientation-quirks: Add quirk for Nanote UMPC-01 drm: panel-orientation-quirks: Add quirk for Acer Switch V 10 (SW5-017) ASoC: Intel: bytcht_es8316: Add quirk for the Nanote UMPC-01 Input: goodix - try resetting the controller when no config is set Input: soc_button_array - add use_low_level_irq module parameter Input: soc_button_array - add Acer Switch V 10 to dmi_use_low_level_irq[] platform/x86: acer-wmi: Enable SW_TABLET_MODE on Switch V 10 (SW5-017) platform/x86: ideapad-laptop: Add module parameters to match DMI quirk tables
Harald Freudenberger (1): s390/zcrypt: fix warning about field-spanning write
Heiko Carstens (2): s390: always build relocatable kernel s390/crashdump: fix TOD programmable field size
Herbert Xu (1): af_key: Fix send_acquire race with pfkey_register
Huacai Chen (2): LoongArch: Clear FPU/SIMD thread info flags for kernel thread LoongArch: Set _PAGE_DIRTY only if _PAGE_WRITE is set in {pmd,pte}_mkdirty()
Hui Tang (1): net: mvpp2: fix possible invalid pointer dereference
Imre Deak (1): drm/i915: Fix warn in intel_display_power_*_domain() functions
Ivan Hu (1): platform/x86/intel/hid: Add some ACPI device IDs
Ivan Vecera (2): iavf: Fix a crash during reset task iavf: Do not restart Tx queues after reset task failure
Jack Xiao (1): drm/amd/amdgpu: reserve vm invalidation engine for firmware
Jaco Coetzee (1): nfp: add port from netdev validation for EEPROM access
Jakob Unterwurzacher (1): arm64: dts: rockchip: lower rk3399-puma-haikou SD controller clock frequency
Jason A. Donenfeld (2): wifi: airo: do not assign -1 to unsigned char MIPS: pic32: treat port as signed integer
Jason Ekstrand (1): dma-buf: Use dma_fence_unwrap_for_each when importing fences
Jay Cornwall (1): drm/amdkfd: update GFX11 CWSR trap handler
Jens Axboe (1): io_uring: clear TIF_NOTIFY_SIGNAL if set and task_work not available
Jiasheng Jiang (2): ASoC: max98373: Add checks for devm_kcalloc octeontx2-pf: Add check for devm_kcalloc
Johannes Weiner (1): mm: vmscan: fix extreme overreclaim and swap floods
Jon Hunter (1): spi: tegra210-quad: Don't initialise DMA if not supported
Jonas Jelonek (1): wifi: mac80211_hwsim: fix debugfs attribute ps with rc table support
Josef Bacik (1): btrfs: free btrfs_path before copying root refs to userspace
Jozsef Kadlecsik (1): netfilter: ipset: restore allowing 64 clashing elements in hash:net,iface
Junxiao Chang (1): ASoC: hdac_hda: fix hda pcm buffer overflow issue
Kai Vehmanen (1): ASoC: SOF: ipc3-topology: use old pipeline teardown flow with SOF2.1 and older
Kai-Heng Feng (1): platform/x86: hp-wmi: Ignore Smart Experience App event
Kazuki Takiguchi (1): KVM: x86/mmu: Fix race condition in direct_page_fault
Keith Busch (4): nvme: quiet user passthrough command errors block: make blk_set_default_limits() private dm-integrity: set dma_alignment limit in io_hints dm-log-writes: set dma_alignment limit in io_hints
Kenneth Lee (1): ceph: Use kcalloc for allocating multiple elements
Krishna Yarlagadda (1): spi: tegra210-quad: Fix duplicate resource error
Kuniyuki Iwashima (2): arm64/syscall: Include asm/ptrace.h in syscall_wrapper header. dccp/tcp: Reset saddr on failure after inet6?_hash_connect().
Lee, Alvin (1): drm/amd/display: Added debug option for forcing subvp num ways
Lennard Gäher (1): platform/x86: thinkpad_acpi: Enable s2idle quirk for 21A1 machine type
Leon Romanovsky (1): net: liquidio: simplify if expression
Lev Popov (1): arm64: dts: rockchip: fix quartz64-a bluetooth configuration
Li Hua (1): test_kprobes: fix implicit declaration error of test_kprobes
Li Liguang (1): mm: correctly charge compressed memory to its memcg
Li Zetao (1): virtio_net: Fix probe failed when modprobe virtio_net
Lin Ma (3): nfc/nci: fix race with opening and closing io_uring/filetable: fix file reference underflow io_uring/poll: fix poll_refs race with cancelation
Linus Walleij (2): power: supply: ab8500: Defer thermal zone probe bus: ixp4xx: Don't touch bit 7 on IXP42x
Liu Jian (2): net: ethernet: mtk_eth_soc: fix error handling in mtk_open() net: sparx5: fix error handling in sparx5_port_open()
Liu Shixin (1): NFC: nci: fix memory leak in nci_rx_data_packet()
Lukas Wunner (1): serial: 8250: 8250_omap: Avoid RS485 RTS glitch on ->set_termios()
Lyude Paul (2): drm/display/dp_mst: Fix drm_dp_mst_add_affected_dsc_crtcs() return code drm/amd/dc/dce120: Fix audio register mapping, stop triggering KASAN
M Chetan Kumar (1): net: wwan: iosm: fix kernel test robot reported errors
Mahesh Bandewar (1): ipvlan: hold lower dev to avoid possible use-after-free
Manyi Li (1): platform/x86: ideapad-laptop: Disable touchpad_switch
Marc Kleine-Budde (1): spi: spi-imx: spi_imx_transfer_one(): check for DMA transfer first
Marek Marczykowski-Górecki (1): xen-pciback: Allow setting PCI_MSIX_FLAGS_MASKALL too
Marek Szyprowski (1): usb: dwc3: exynos: Fix remove() function
Martin Faltesek (3): nfc: st-nci: fix incorrect validating logic in EVT_TRANSACTION nfc: st-nci: fix memory leaks in EVT_TRANSACTION nfc: st-nci: fix incorrect sizing calculations in EVT_TRANSACTION
Matthew Auld (1): drm/i915/ttm: never purge busy objects
Matthieu Baerts (2): selftests: mptcp: run mptcp_sockopt from a new netns selftests: mptcp: fix mibit vs mbit mix up
Matti Vaittinen (1): tools: iio: iio_generic_buffer: Fix read size
Maxim Levitsky (5): KVM: x86: nSVM: leave nested mode on vCPU free KVM: x86: forcibly leave nested mode on vCPU reset KVM: x86: nSVM: harden svm_free_nested against freeing vmcb02 while still in use KVM: x86: add kvm_leave_nested KVM: x86: remove exit_int_info warning in svm_handle_exit
Maximilian Luz (2): platform/surface: aggregator_registry: Add support for Surface Pro 9 platform/surface: aggregator_registry: Add support for Surface Laptop 5
Michael Grzeschik (2): ARM: dts: at91: sam9g20ek: enable udc vbus gpio pinctrl usb: dwc3: gadget: conditionally remove requests
Michael Kelley (2): scsi: storvsc: Fix handling of srb_status and capacity change events x86/ioremap: Fix page aligned size calculation in __ioremap_caller()
Miklos Szeredi (1): fuse: lock inode unconditionally in fuse_fallocate()
Mikulas Patocka (2): dm integrity: flush the journal on suspend dm integrity: clear the journal on suspend
Moshe Shemesh (4): net/mlx5: Fix FW tracer timestamp calculation net/mlx5: cmdif, Print info on any firmware cmd failure to tracepoint net/mlx5: Fix handling of entry refcount when command is not issued to FW net/mlx5: Fix sync reset event handler error flow
Mukesh Ojha (1): gcov: clang: fix the buffer overflow issue
Nathan Chancellor (2): RISC-V: vdso: Do not add missing symbols to version section in linker script bpf: Add explicit cast to 'void *' for __BPF_DISPATCHER_UPDATE()
Nicolas Cavallari (1): wifi: mac80211: Fix ack frame idr leak when mesh has no route
Olivier Moysan (1): ASoC: stm32: dfsdm: manage cb buffers cleanup
Ondrej Jirman (1): power: supply: ip5xxx: Fix integer overflow in current_now calculation
Pablo Neira Ayuso (1): netfilter: nf_tables: do not set up extensions for end interval
Paolo Abeni (1): selftests: mptcp: gives slow test-case more time
Paul Zhang (1): wifi: cfg80211: Fix bitrates overflow issue
Pavel Begunkov (4): selftests/net: don't tests batched TCP io_uring zc io_uring/poll: lockdep annote io_poll_req_insert_locked io_uring: cmpxchg for poll arm refs release io_uring: make poll refs more robust
Pawan Gupta (2): x86/tsx: Add a feature bit for TSX control MSR support x86/pm: Add enumeration check before spec MSRs save/restore setup
Pawel Laszczak (2): usb: cdnsp: Fix issue with Clear Feature Halt Endpoint usb: cdnsp: fix issue with ZLP - added TD_SIZE = 1
Perry Yuan (1): cpufreq: amd-pstate: change amd-pstate driver to be built-in type
Peter Gonda (1): virt/sev-guest: Prevent IV reuse in the SNP guest driver
Peter Kosyh (1): net/mlx4: Check retval of mlx4_bitmap_init
Peter Zijlstra (1): bpf: Convert BPF_DISPATCHER to use static_call() (not ftrace)
Phil Turnbull (4): wifi: wilc1000: validate pairwise and authentication suite offsets wifi: wilc1000: validate length of IEEE80211_P2P_ATTR_OPER_CHANNEL attribute wifi: wilc1000: validate length of IEEE80211_P2P_ATTR_CHANNEL_LIST attribute wifi: wilc1000: validate number of channels
Philip Yang (1): drm/amdgpu: Drop eviction lock when allocating PT BO
Pierre-Louis Bossart (2): ASoC: Intel: soc-acpi: add ES83x6 support to IceLake ASoC: hda: intel-dsp-config: add ES83x6 quirk for IceLake
Qi Zheng (1): mm: fix unexpected changes to {failslab|fail_page_alloc}.attr
Ramesh Errabolu (1): drm/amdgpu: Enable Aldebaran devices to report CU Occupancy
Randy Dunlap (1): nios2: add FORCE for vmlinuz.gz
Richard Fitzgerald (1): ASoC: soc-pcm: Don't zero TDM masks in __soc_pcm_open()
Robin Murphy (1): gpu: host1x: Avoid trying to use GART on Tegra20
Roi Dayan (1): net/mlx5: E-Switch, Set correctly vport destination
Roy Novich (1): net/mlx5: Do not query pci info while pci disabled
Russ Weight (1): fpga: m10bmc-sec: Fix kconfig dependencies
Sabrina Dubroca (1): Revert "net: macsec: report real_dev features when HW offloading is enabled"
Samuel Holland (2): bus: sunxi-rsb: Remove the shutdown callback bus: sunxi-rsb: Support atomic transfers
Santiago Ruano Rincón (1): net/cdc_ncm: Fix multicast RX support for CDC NCM devices with ZLP
Sean Christopherson (1): drm/i915/gvt: Get reference to KVM iff attachment to VM is successful
Sean Nyekjaer (1): spi: stm32: fix stm32_spi_prepare_mbr() that halves spi clk for every run
SeongJae Park (1): mm/damon/sysfs-schemes: skip stats update if the scheme directory is removed
Shay Drory (1): net/mlx5: SF: Fix probing active SFs during driver probe phase
Shin'ichiro Kawasaki (1): scsi: mpi3mr: Suppress command reply debug prints
Slawomir Laba (1): iavf: Fix race condition between iavf_shutdown and iavf_remove
Stefan Assmann (1): iavf: remove INITIAL_MAC_SET to allow gARP to work properly
Stefan Haberland (1): s390/dasd: fix no record found for raw_track_access
Steve Su (1): drm/amd/display: Fix gpio port mapping issue
Svyatoslav Feldsherov (1): fs: do not update freeing inode i_io_list
Takashi Iwai (1): Input: i8042 - apply probe defer to more ASUS ZenBook models
Thinh Nguyen (2): usb: dwc3: gadget: Return -ESHUTDOWN on ep disable usb: dwc3: gadget: Clear ep descriptor last
Thomas Jarosch (1): xfrm: Fix oops in __xfrm_state_delete()
Thomas Zeitlhofer (1): net: neigh: decrement the family specific qlen
Tsung-hua Lin (1): drm/amd/display: No display after resume from WB/CB
Tyler J. Stachecki (1): wifi: ath11k: Fix QCN9074 firmware boot on x86
Vasanth Sadhasivan (1): can: gs_usb: remove dma allocations
Vishwanath Pai (1): netfilter: ipset: regression in ip_set_hash_ip.c
Vitaly Kuznetsov (1): x86/hyperv: Restore VP assist page after cpu offlining/onlining
Vladimir Oltean (3): net: dsa: sja1105: disallow C45 transactions on the BASE-TX MDIO bus net: enetc: cache accesses to &priv->si->hw net: enetc: preserve TX ring priority across reconfiguration
Wang Hai (2): net: pch_gbe: fix potential memleak in pch_gbe_tx_queue() arcnet: fix potential memory leak in com20020_probe()
Wang ShaoBo (1): net: wwan: iosm: use ACPI_FREE() but not kfree() in ipc_pcie_read_bios_cfg()
Wei Yongjun (2): net: phy: at803x: fix error return code in at803x_probe() s390/ap: fix memory leak in ap_init_qci_info()
Wyes Karny (1): cpufreq: amd-pstate: cpufreq: amd-pstate: reset MSR_AMD_PERF_CTL register at init
Xin Long (3): tipc: set con sock in tipc_conn_alloc tipc: add an extra conn_get in tipc_conn_alloc net: sched: allow act_ct to be built without NF_NAT
Xiongfeng Wang (3): spi: dw-dma: decrease reference count in dw_spi_dma_init_mfld() octeontx2-af: Fix reference count issue in rvu_sdp_init() platform/x86: asus-wmi: add missing pci_dev_put() in asus_wmi_set_xusb2pr()
Xiubo Li (1): ceph: fix NULL pointer dereference for req->r_session
Yan Cangang (1): net: ethernet: mtk_eth_soc: fix resource leak in error path
Yang Yingliang (8): regulator: rt5759: fix OOB in validate_desc() regulator: core: fix UAF in destroy_regulator() tee: optee: fix possible memory leak in optee_register_device() octeontx2-af: debugsfs: fix pci device refcount leak net: pch_gbe: fix pci device refcount leak while module exiting Drivers: hv: vmbus: fix double free in the error path of vmbus_add_channel_work() Drivers: hv: vmbus: fix possible memory leak in vmbus_device_register() bnx2x: fix pci device refcount leak in bnx2x_vf_is_pcie_pending()
Youlin Li (1): selftests/bpf: Add verifier test for release_reference()
Yu Kuai (1): block, bfq: fix null pointer dereference in bfq_bio_bfqg()
Yu Liao (1): net: thunderx: Fix the ACPI memory leak
Yuan Can (1): net: dm9051: Fix missing dev_kfree_skb() in dm9051_loop_rx()
YueHaibing (2): macsec: Fix invalid error code set tipc: check skb_linearize() return value in tipc_disc_rcv()
Zeng Heng (1): regulator: core: fix kobject release warning and memory leak in regulator_register()
Zhang Changzhong (3): net/qla3xxx: fix potential memleak in ql3xxx_send() sfc: fix potential memleak in __ef100_hard_start_xmit() net: marvell: prestera: add missing unregister_netdev() in prestera_port_create()
Zhang Xiaoxu (1): zonefs: Fix race between modprobe and mount
Zhen Lei (1): btrfs: sysfs: normalize the error handling branch in btrfs_init_sysfs()
Zheng Yongjun (1): ARM: mxs: fix memory leak in mxs_machine_init()
Zhengchao Shao (1): 9p/fd: fix issue of list_del corruption in p9_fd_cancel()
Zhou Guanghui (1): scsi: iscsi: Fix possible memory leak when device_register() failed
Zhu Ning (1): ASoC: sof_es8336: reduce pop noise on speaker
Ziyang Xuan (2): net: ethernet: mtk_eth_soc: fix potential memory leak in mtk_rx_alloc() ipv4: Fix error return code in fib_table_insert()
ruanjinjie (1): xen/platform-pci: add missing free_irq() in error path
taozhang (1): wifi: mac80211: fix memory free error when registering wiphy fail
diff --git a/Documentation/devicetree/bindings/iio/adc/aspeed,ast2600-adc.yaml b/Documentation/devicetree/bindings/iio/adc/aspeed,ast2600-adc.yaml index b283c8ca2bbf..5c08d8b6e995 100644 --- a/Documentation/devicetree/bindings/iio/adc/aspeed,ast2600-adc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/aspeed,ast2600-adc.yaml @@ -62,13 +62,6 @@ properties: description: Inform the driver that last channel will be used to sensor battery.
- aspeed,trim-data-valid: - type: boolean - description: | - The ADC reference voltage can be calibrated to obtain the trimming - data which will be stored in otp. This property informs the driver that - the data store in the otp is valid. - required: - compatible - reg diff --git a/Makefile b/Makefile index 4f7da26fef78..9fecb094c28a 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 0 -SUBLEVEL = 10 +SUBLEVEL = 11 EXTRAVERSION = NAME = Hurr durr I'ma ninja sloth
diff --git a/arch/arm/boot/dts/am335x-pcm-953.dtsi b/arch/arm/boot/dts/am335x-pcm-953.dtsi index dae448040a97..947497413977 100644 --- a/arch/arm/boot/dts/am335x-pcm-953.dtsi +++ b/arch/arm/boot/dts/am335x-pcm-953.dtsi @@ -12,22 +12,20 @@ / { compatible = "phytec,am335x-pcm-953", "phytec,am335x-phycore-som", "ti,am33xx";
/* Power */ - regulators { - vcc3v3: fixedregulator@1 { - compatible = "regulator-fixed"; - regulator-name = "vcc3v3"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - regulator-boot-on; - }; + vcc3v3: fixedregulator1 { + compatible = "regulator-fixed"; + regulator-name = "vcc3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + };
- vcc1v8: fixedregulator@2 { - compatible = "regulator-fixed"; - regulator-name = "vcc1v8"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - regulator-boot-on; - }; + vcc1v8: fixedregulator2 { + compatible = "regulator-fixed"; + regulator-name = "vcc1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-boot-on; };
/* User IO */ diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi index 60d61291f344..024af2db638e 100644 --- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi +++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi @@ -39,6 +39,13 @@ pinctrl_pck0_as_mck: pck0_as_mck {
};
+ usb1 { + pinctrl_usb1_vbus_gpio: usb1_vbus_gpio { + atmel,pins = + <AT91_PIOC 5 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>; /* PC5 GPIO */ + }; + }; + mmc0_slot1 { pinctrl_board_mmc0_slot1: mmc0_slot1-board { atmel,pins = @@ -84,6 +91,8 @@ macb0: ethernet@fffc4000 { };
usb1: gadget@fffa4000 { + pinctrl-0 = <&pinctrl_usb1_vbus_gpio>; + pinctrl-names = "default"; atmel,vbus-gpio = <&pioC 5 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6q-prti6q.dts b/arch/arm/boot/dts/imx6q-prti6q.dts index b4605edfd2ab..d8fa83effd63 100644 --- a/arch/arm/boot/dts/imx6q-prti6q.dts +++ b/arch/arm/boot/dts/imx6q-prti6q.dts @@ -364,8 +364,8 @@ wifi { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_wifi>; interrupts-extended = <&gpio1 30 IRQ_TYPE_LEVEL_HIGH>; - ref-clock-frequency = "38400000"; - tcxo-clock-frequency = "19200000"; + ref-clock-frequency = <38400000>; + tcxo-clock-frequency = <19200000>; }; };
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c index 25c9d184fa4c..1c57ac401649 100644 --- a/arch/arm/mach-mxs/mach-mxs.c +++ b/arch/arm/mach-mxs/mach-mxs.c @@ -393,8 +393,10 @@ static void __init mxs_machine_init(void)
root = of_find_node_by_path("/"); ret = of_property_read_string(root, "model", &soc_dev_attr->machine); - if (ret) + if (ret) { + kfree(soc_dev_attr); return; + }
soc_dev_attr->family = "Freescale MXS Family"; soc_dev_attr->soc_id = mxs_get_soc_id(); diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts index 04c752f49be9..115c14c0a3c6 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts @@ -207,7 +207,7 @@ &sdmmc { cap-sd-highspeed; cd-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>; disable-wp; - max-frequency = <150000000>; + max-frequency = <40000000>; pinctrl-names = "default"; pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; vmmc-supply = <&vcc3v3_baseboard>; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts index a05460b92415..25a8c781f4e7 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts @@ -740,7 +740,7 @@ &uart0 {
&uart1 { pinctrl-names = "default"; - pinctrl-0 = <&uart1m0_xfer &uart1m0_ctsn>; + pinctrl-0 = <&uart1m0_xfer &uart1m0_ctsn &uart1m0_rtsn>; status = "okay"; uart-has-rtscts;
@@ -748,13 +748,14 @@ bluetooth { compatible = "brcm,bcm43438-bt"; clocks = <&rk817 1>; clock-names = "lpo"; - device-wakeup-gpios = <&gpio2 RK_PC1 GPIO_ACTIVE_HIGH>; - host-wakeup-gpios = <&gpio2 RK_PC0 GPIO_ACTIVE_HIGH>; + host-wakeup-gpios = <&gpio2 RK_PC1 GPIO_ACTIVE_HIGH>; + device-wakeup-gpios = <&gpio2 RK_PC0 GPIO_ACTIVE_HIGH>; shutdown-gpios = <&gpio2 RK_PB7 GPIO_ACTIVE_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&bt_host_wake_l &bt_wake_l &bt_enable_h>; vbat-supply = <&vcc_sys>; vddio-supply = <&vcca1v8_pmu>; + max-speed = <3000000>; }; };
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts index 528bb4e8ac77..a2d0524e0ec9 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts @@ -176,7 +176,7 @@ rk809: pmic@20 { compatible = "rockchip,rk809"; reg = <0x20>; interrupt-parent = <&gpio0>; - interrupts = <RK_PA7 IRQ_TYPE_LEVEL_LOW>; + interrupts = <RK_PA3 IRQ_TYPE_LEVEL_LOW>; clock-output-names = "rk808-clkout1", "rk808-clkout2";
pinctrl-names = "default"; diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h index b383b4802a7b..d30217c21eff 100644 --- a/arch/arm64/include/asm/syscall_wrapper.h +++ b/arch/arm64/include/asm/syscall_wrapper.h @@ -8,7 +8,7 @@ #ifndef __ASM_SYSCALL_WRAPPER_H #define __ASM_SYSCALL_WRAPPER_H
-struct pt_regs; +#include <asm/ptrace.h>
#define SC_ARM64_REGS_TO_ARGS(x, ...) \ __MAP(x,__SC_ARGS \ diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index 8ea57e2f0e04..cc0674d1b8f0 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -349,7 +349,9 @@ static inline pte_t pte_mkclean(pte_t pte)
static inline pte_t pte_mkdirty(pte_t pte) { - pte_val(pte) |= (_PAGE_DIRTY | _PAGE_MODIFIED); + pte_val(pte) |= _PAGE_MODIFIED; + if (pte_val(pte) & _PAGE_WRITE) + pte_val(pte) |= _PAGE_DIRTY; return pte; }
@@ -475,7 +477,9 @@ static inline pmd_t pmd_mkclean(pmd_t pmd)
static inline pmd_t pmd_mkdirty(pmd_t pmd) { - pmd_val(pmd) |= (_PAGE_DIRTY | _PAGE_MODIFIED); + pmd_val(pmd) |= _PAGE_MODIFIED; + if (pmd_val(pmd) & _PAGE_WRITE) + pmd_val(pmd) |= _PAGE_DIRTY; return pmd; }
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c index 660492f064e7..6ae7c669ee64 100644 --- a/arch/loongarch/kernel/process.c +++ b/arch/loongarch/kernel/process.c @@ -152,7 +152,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) childregs->csr_crmd = p->thread.csr_crmd; childregs->csr_prmd = p->thread.csr_prmd; childregs->csr_ecfg = p->thread.csr_ecfg; - return 0; + goto out; }
/* user thread */ @@ -171,14 +171,15 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) */ childregs->csr_euen = 0;
+ if (clone_flags & CLONE_SETTLS) + childregs->regs[2] = tls; + +out: clear_tsk_thread_flag(p, TIF_USEDFPU); clear_tsk_thread_flag(p, TIF_USEDSIMD); clear_tsk_thread_flag(p, TIF_LSX_CTX_LIVE); clear_tsk_thread_flag(p, TIF_LASX_CTX_LIVE);
- if (clone_flags & CLONE_SETTLS) - childregs->regs[2] = tls; - return 0; }
diff --git a/arch/mips/include/asm/fw/fw.h b/arch/mips/include/asm/fw/fw.h index d0ef8b4892bb..d0494ce4b337 100644 --- a/arch/mips/include/asm/fw/fw.h +++ b/arch/mips/include/asm/fw/fw.h @@ -26,6 +26,6 @@ extern char *fw_getcmdline(void); extern void fw_meminit(void); extern char *fw_getenv(char *name); extern unsigned long fw_getenvl(char *name); -extern void fw_init_early_console(char port); +extern void fw_init_early_console(void);
#endif /* __ASM_FW_H_ */ diff --git a/arch/mips/pic32/pic32mzda/early_console.c b/arch/mips/pic32/pic32mzda/early_console.c index 25372e62783b..3cd1b408fa1c 100644 --- a/arch/mips/pic32/pic32mzda/early_console.c +++ b/arch/mips/pic32/pic32mzda/early_console.c @@ -27,7 +27,7 @@ #define U_BRG(x) (UART_BASE(x) + 0x40)
static void __iomem *uart_base; -static char console_port = -1; +static int console_port = -1;
static int __init configure_uart_pins(int port) { @@ -47,7 +47,7 @@ static int __init configure_uart_pins(int port) return 0; }
-static void __init configure_uart(char port, int baud) +static void __init configure_uart(int port, int baud) { u32 pbclk;
@@ -60,7 +60,7 @@ static void __init configure_uart(char port, int baud) uart_base + PIC32_SET(U_STA(port))); }
-static void __init setup_early_console(char port, int baud) +static void __init setup_early_console(int port, int baud) { if (configure_uart_pins(port)) return; @@ -130,16 +130,15 @@ static int __init get_baud_from_cmdline(char *arch_cmdline) return baud; }
-void __init fw_init_early_console(char port) +void __init fw_init_early_console(void) { char *arch_cmdline = pic32_getcmdline(); - int baud = -1; + int baud, port;
uart_base = ioremap(PIC32_BASE_UART, 0xc00);
baud = get_baud_from_cmdline(arch_cmdline); - if (port == -1) - port = get_port_from_cmdline(arch_cmdline); + port = get_port_from_cmdline(arch_cmdline);
if (port == -1) port = EARLY_CONSOLE_PORT; diff --git a/arch/mips/pic32/pic32mzda/init.c b/arch/mips/pic32/pic32mzda/init.c index d9c8c4e46aff..58d8ca730df7 100644 --- a/arch/mips/pic32/pic32mzda/init.c +++ b/arch/mips/pic32/pic32mzda/init.c @@ -47,7 +47,7 @@ void __init plat_mem_setup(void) strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
#ifdef CONFIG_EARLY_PRINTK - fw_init_early_console(-1); + fw_init_early_console(); #endif pic32_config_init(); } diff --git a/arch/nios2/boot/Makefile b/arch/nios2/boot/Makefile index 8c3ad76602f3..29c11a06b750 100644 --- a/arch/nios2/boot/Makefile +++ b/arch/nios2/boot/Makefile @@ -20,7 +20,7 @@ $(obj)/vmlinux.bin: vmlinux FORCE $(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE $(call if_changed,gzip)
-$(obj)/vmImage: $(obj)/vmlinux.gz +$(obj)/vmImage: $(obj)/vmlinux.gz FORCE $(call if_changed,uimage) @$(kecho) 'Kernel: $@ is ready'
diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts index ced0d4e47938..900a50526d77 100644 --- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts +++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts @@ -3,6 +3,8 @@
#include "fu540-c000.dtsi" #include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/leds/common.h> +#include <dt-bindings/pwm/pwm.h>
/* Clock frequency (in Hz) of the PCB crystal for rtcclk */ #define RTCCLK_FREQ 1000000 @@ -42,6 +44,42 @@ gpio-restart { compatible = "gpio-restart"; gpios = <&gpio 10 GPIO_ACTIVE_LOW>; }; + + led-controller { + compatible = "pwm-leds"; + + led-d1 { + pwms = <&pwm0 0 7812500 PWM_POLARITY_INVERTED>; + active-low; + color = <LED_COLOR_ID_GREEN>; + max-brightness = <255>; + label = "d1"; + }; + + led-d2 { + pwms = <&pwm0 1 7812500 PWM_POLARITY_INVERTED>; + active-low; + color = <LED_COLOR_ID_GREEN>; + max-brightness = <255>; + label = "d2"; + }; + + led-d3 { + pwms = <&pwm0 2 7812500 PWM_POLARITY_INVERTED>; + active-low; + color = <LED_COLOR_ID_GREEN>; + max-brightness = <255>; + label = "d3"; + }; + + led-d4 { + pwms = <&pwm0 3 7812500 PWM_POLARITY_INVERTED>; + active-low; + color = <LED_COLOR_ID_GREEN>; + max-brightness = <255>; + label = "d4"; + }; + }; };
&uart0 { diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 84ac0fe612e7..db6548509bb3 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -28,6 +28,9 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
obj-y += vdso.o CPPFLAGS_vdso.lds += -P -C -U$(ARCH) +ifneq ($(filter vgettimeofday, $(vdso-syms)),) +CPPFLAGS_vdso.lds += -DHAS_VGETTIMEOFDAY +endif
# Disable -pg to prevent insert call site CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S index 01d94aae5bf5..150b1a572e61 100644 --- a/arch/riscv/kernel/vdso/vdso.lds.S +++ b/arch/riscv/kernel/vdso/vdso.lds.S @@ -68,9 +68,11 @@ VERSION LINUX_4.15 { global: __vdso_rt_sigreturn; +#ifdef HAS_VGETTIMEOFDAY __vdso_gettimeofday; __vdso_clock_gettime; __vdso_clock_getres; +#endif __vdso_getcpu; __vdso_flush_icache; local: *; diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 318fce77601d..de575af02ffe 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -568,8 +568,7 @@ config EXPOLINE_FULL endchoice
config RELOCATABLE - bool "Build a relocatable kernel" - default y + def_bool y help This builds a kernel image that retains relocation information so it can be loaded at an arbitrary address. @@ -578,10 +577,11 @@ config RELOCATABLE bootup process. The relocations make the kernel image about 15% larger (compressed 10%), but are discarded at runtime. + Note: this option exists only for documentation purposes, please do + not remove it.
config RANDOMIZE_BASE bool "Randomize the address of the kernel image (KASLR)" - depends on RELOCATABLE default y help In support of Kernel Address Space Layout Randomization (KASLR), diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 4cb5d17e7ead..47bec926d6c0 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -14,10 +14,8 @@ KBUILD_AFLAGS_MODULE += -fPIC KBUILD_CFLAGS_MODULE += -fPIC KBUILD_AFLAGS += -m64 KBUILD_CFLAGS += -m64 -ifeq ($(CONFIG_RELOCATABLE),y) KBUILD_CFLAGS += -fPIE LDFLAGS_vmlinux := -pie -endif aflags_dwarf := -Wa,-gdwarf-2 KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__ ifndef CONFIG_AS_IS_LLVM diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile index 883357a211a3..d52c3e2e16bc 100644 --- a/arch/s390/boot/Makefile +++ b/arch/s390/boot/Makefile @@ -37,9 +37,8 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o -obj-y += version.o pgm_check_info.o ctype.o ipl_data.o +obj-y += version.o pgm_check_info.o ctype.o ipl_data.o machine_kexec_reloc.o obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o -obj-$(CONFIG_RELOCATABLE) += machine_kexec_reloc.o obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index bc48fe82d949..e5026e1d277f 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -285,8 +285,7 @@ void startup_kernel(void)
clear_bss_section(); copy_bootdata(); - if (IS_ENABLED(CONFIG_RELOCATABLE)) - handle_relocs(__kaslr_offset); + handle_relocs(__kaslr_offset);
if (__kaslr_offset) { /* diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index bad8f47fc5d6..c1b2b0d4af77 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -45,7 +45,7 @@ struct save_area { u64 fprs[16]; u32 fpc; u32 prefix; - u64 todpreg; + u32 todpreg; u64 timer; u64 todcmp; u64 vxrs_low[16]; diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 3de6d8b53367..a0165df3c4d8 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -77,7 +77,7 @@ static int hyperv_init_ghcb(void) static int hv_cpu_init(unsigned int cpu) { union hv_vp_assist_msr_contents msr = { 0 }; - struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()]; + struct hv_vp_assist_page **hvp = &hv_vp_assist_page[cpu]; int ret;
ret = hv_common_cpu_init(cpu); @@ -87,34 +87,32 @@ static int hv_cpu_init(unsigned int cpu) if (!hv_vp_assist_page) return 0;
- if (!*hvp) { - if (hv_root_partition) { - /* - * For root partition we get the hypervisor provided VP assist - * page, instead of allocating a new page. - */ - rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); - *hvp = memremap(msr.pfn << - HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT, - PAGE_SIZE, MEMREMAP_WB); - } else { - /* - * The VP assist page is an "overlay" page (see Hyper-V TLFS's - * Section 5.2.1 "GPA Overlay Pages"). Here it must be zeroed - * out to make sure we always write the EOI MSR in - * hv_apic_eoi_write() *after* the EOI optimization is disabled - * in hv_cpu_die(), otherwise a CPU may not be stopped in the - * case of CPU offlining and the VM will hang. - */ + if (hv_root_partition) { + /* + * For root partition we get the hypervisor provided VP assist + * page, instead of allocating a new page. + */ + rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); + *hvp = memremap(msr.pfn << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT, + PAGE_SIZE, MEMREMAP_WB); + } else { + /* + * The VP assist page is an "overlay" page (see Hyper-V TLFS's + * Section 5.2.1 "GPA Overlay Pages"). Here it must be zeroed + * out to make sure we always write the EOI MSR in + * hv_apic_eoi_write() *after* the EOI optimization is disabled + * in hv_cpu_die(), otherwise a CPU may not be stopped in the + * case of CPU offlining and the VM will hang. + */ + if (!*hvp) *hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO); - if (*hvp) - msr.pfn = vmalloc_to_pfn(*hvp); - } - WARN_ON(!(*hvp)); - if (*hvp) { - msr.enable = 1; - wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); - } + if (*hvp) + msr.pfn = vmalloc_to_pfn(*hvp); + + } + if (!WARN_ON(!(*hvp))) { + msr.enable = 1; + wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); }
return hyperv_init_ghcb(); diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index ef4775c6db01..dfa672bec610 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -305,6 +305,9 @@ #define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */ #define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
+ +#define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */ + /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c index ec7bbac3a9f2..8009c8346d8f 100644 --- a/arch/x86/kernel/cpu/tsx.c +++ b/arch/x86/kernel/cpu/tsx.c @@ -58,24 +58,6 @@ static void tsx_enable(void) wrmsrl(MSR_IA32_TSX_CTRL, tsx); }
-static bool tsx_ctrl_is_supported(void) -{ - u64 ia32_cap = x86_read_arch_cap_msr(); - - /* - * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this - * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES. - * - * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a - * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES - * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get - * MSR_IA32_TSX_CTRL support even after a microcode update. Thus, - * tsx= cmdline requests will do nothing on CPUs without - * MSR_IA32_TSX_CTRL support. - */ - return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR); -} - static enum tsx_ctrl_states x86_get_tsx_auto_mode(void) { if (boot_cpu_has_bug(X86_BUG_TAA)) @@ -135,7 +117,7 @@ static void tsx_clear_cpuid(void) rdmsrl(MSR_TSX_FORCE_ABORT, msr); msr |= MSR_TFA_TSX_CPUID_CLEAR; wrmsrl(MSR_TSX_FORCE_ABORT, msr); - } else if (tsx_ctrl_is_supported()) { + } else if (cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL)) { rdmsrl(MSR_IA32_TSX_CTRL, msr); msr |= TSX_CTRL_CPUID_CLEAR; wrmsrl(MSR_IA32_TSX_CTRL, msr); @@ -158,7 +140,8 @@ static void tsx_dev_mode_disable(void) u64 mcu_opt_ctrl;
/* Check if RTM_ALLOW exists */ - if (!boot_cpu_has_bug(X86_BUG_TAA) || !tsx_ctrl_is_supported() || + if (!boot_cpu_has_bug(X86_BUG_TAA) || + !cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL) || !cpu_feature_enabled(X86_FEATURE_SRBDS_CTRL)) return;
@@ -191,7 +174,20 @@ void __init tsx_init(void) return; }
- if (!tsx_ctrl_is_supported()) { + /* + * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this + * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES. + * + * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a + * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES + * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get + * MSR_IA32_TSX_CTRL support even after a microcode update. Thus, + * tsx= cmdline requests will do nothing on CPUs without + * MSR_IA32_TSX_CTRL support. + */ + if (x86_read_arch_cap_msr() & ARCH_CAP_TSX_CTRL_MSR) { + setup_force_cpu_cap(X86_FEATURE_MSR_TSX_CTRL); + } else { tsx_ctrl_state = TSX_CTRL_NOT_SUPPORTED; return; } diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 83e30e4db2ae..0a163ba9fa1a 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2431,6 +2431,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm, { bool list_unstable, zapped_root = false;
+ lockdep_assert_held_write(&kvm->mmu_lock); trace_kvm_mmu_prepare_zap_page(sp); ++kvm->stat.mmu_shadow_zapped; *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list); @@ -4250,14 +4251,14 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault if (is_page_fault_stale(vcpu, fault, mmu_seq)) goto out_unlock;
- r = make_mmu_pages_available(vcpu); - if (r) - goto out_unlock; - - if (is_tdp_mmu_fault) + if (is_tdp_mmu_fault) { r = kvm_tdp_mmu_map(vcpu, fault); - else + } else { + r = make_mmu_pages_available(vcpu); + if (r) + goto out_unlock; r = __direct_map(vcpu, fault); + }
out_unlock: if (is_tdp_mmu_fault) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 76dcc8a3e849..630359bce06d 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -1143,6 +1143,9 @@ void svm_free_nested(struct vcpu_svm *svm) if (!svm->nested.initialized) return;
+ if (WARN_ON_ONCE(svm->vmcb != svm->vmcb01.ptr)) + svm_switch_vmcb(svm, &svm->vmcb01); + svm_vcpu_free_msrpm(svm->nested.msrpm); svm->nested.msrpm = NULL;
@@ -1161,9 +1164,6 @@ void svm_free_nested(struct vcpu_svm *svm) svm->nested.initialized = false; }
-/* - * Forcibly leave nested mode in order to be able to reset the VCPU later on. - */ void svm_leave_nested(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index e80756ab141b..6431019d0eff 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -346,12 +346,6 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) return 0; }
-static int is_external_interrupt(u32 info) -{ - info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID; - return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); -} - static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -1440,6 +1434,7 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu) */ svm_clear_current_vmcb(svm->vmcb);
+ svm_leave_nested(vcpu); svm_free_nested(svm);
sev_free_vcpu(vcpu); @@ -3426,15 +3421,6 @@ static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) return 0; }
- if (is_external_interrupt(svm->vmcb->control.exit_int_info) && - exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR && - exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH && - exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI) - printk(KERN_ERR "%s: unexpected exit_int_info 0x%x " - "exit_code 0x%x\n", - __func__, svm->vmcb->control.exit_int_info, - exit_code); - if (exit_fastpath != EXIT_FASTPATH_NONE) return 1;
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 03d348fa6485..f56cc2382edf 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -6294,9 +6294,6 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, return kvm_state.size; }
-/* - * Forcibly leave nested mode in order to be able to reset the VCPU later on. - */ void vmx_leave_nested(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu)) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 71cbafd67319..5d38409913a9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -611,6 +611,12 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload);
+/* Forcibly leave the nested mode in cases like a vCPU reset */ +static void kvm_leave_nested(struct kvm_vcpu *vcpu) +{ + kvm_x86_ops.nested_ops->leave_nested(vcpu); +} + static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error, u32 error_code, bool has_payload, unsigned long payload, bool reinject) @@ -5154,7 +5160,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
if (events->flags & KVM_VCPUEVENT_VALID_SMM) { if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { - kvm_x86_ops.nested_ops->leave_nested(vcpu); + kvm_leave_nested(vcpu); kvm_smm_changed(vcpu, events->smi.smm); }
@@ -11789,8 +11795,18 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) WARN_ON_ONCE(!init_event && (old_cr0 || kvm_read_cr3(vcpu) || kvm_read_cr4(vcpu)));
+ /* + * SVM doesn't unconditionally VM-Exit on INIT and SHUTDOWN, thus it's + * possible to INIT the vCPU while L2 is active. Force the vCPU back + * into L1 as EFER.SVME is cleared on INIT (along with all other EFER + * bits), i.e. virtualization is disabled. + */ + if (is_guest_mode(vcpu)) + kvm_leave_nested(vcpu); + kvm_lapic_reset(vcpu, init_event);
+ WARN_ON_ONCE(is_guest_mode(vcpu) || is_smm(vcpu)); vcpu->arch.hflags = 0;
vcpu->arch.smi_pending = 0; diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 9a1950879fc4..8333b6c50e93 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -954,6 +954,14 @@ static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu) return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result); }
+static inline int max_evtchn_port(struct kvm *kvm) +{ + if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) + return EVTCHN_2L_NR_CHANNELS; + else + return COMPAT_EVTCHN_2L_NR_CHANNELS; +} + static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports, evtchn_port_t *ports) { @@ -1042,6 +1050,10 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode, *r = -EFAULT; goto out; } + if (ports[i] >= max_evtchn_port(vcpu->kvm)) { + *r = -EINVAL; + goto out; + } }
if (sched_poll.nr_ports == 1) @@ -1216,6 +1228,7 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu) bool longmode; u64 input, params[6], r = -ENOSYS; bool handled = false; + u8 cpl;
input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
@@ -1243,9 +1256,17 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu) params[5] = (u64)kvm_r9_read(vcpu); } #endif + cpl = static_call(kvm_x86_get_cpl)(vcpu); trace_kvm_xen_hypercall(input, params[0], params[1], params[2], params[3], params[4], params[5]);
+ /* + * Only allow hypercall acceleration for CPL0. The rare hypercalls that + * are permitted in guest userspace can be handled by the VMM. + */ + if (unlikely(cpl > 0)) + goto handle_in_userspace; + switch (input) { case __HYPERVISOR_xen_version: if (params[0] == XENVER_version && vcpu->kvm->arch.xen.xen_version) { @@ -1280,10 +1301,11 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu) if (handled) return kvm_xen_hypercall_set_result(vcpu, r);
+handle_in_userspace: vcpu->run->exit_reason = KVM_EXIT_XEN; vcpu->run->xen.type = KVM_EXIT_XEN_HCALL; vcpu->run->xen.u.hcall.longmode = longmode; - vcpu->run->xen.u.hcall.cpl = static_call(kvm_x86_get_cpl)(vcpu); + vcpu->run->xen.u.hcall.cpl = cpl; vcpu->run->xen.u.hcall.input = input; vcpu->run->xen.u.hcall.params[0] = params[0]; vcpu->run->xen.u.hcall.params[1] = params[1]; @@ -1298,14 +1320,6 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu) return 0; }
-static inline int max_evtchn_port(struct kvm *kvm) -{ - if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) - return EVTCHN_2L_NR_CHANNELS; - else - return COMPAT_EVTCHN_2L_NR_CHANNELS; -} - static void kvm_xen_check_poller(struct kvm_vcpu *vcpu, int port) { int poll_evtchn = vcpu->arch.xen.poll_evtchn; diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 1ad0228f8ceb..19058d746695 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -216,9 +216,15 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size, * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; - phys_addr &= PHYSICAL_PAGE_MASK; + phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr;
+ /* + * Mask out any bits not part of the actual physical + * address, like memory encryption bits. + */ + phys_addr &= PHYSICAL_PAGE_MASK; + retval = memtype_reserve(phys_addr, (u64)phys_addr + size, pcm, &new_pcm); if (retval) { diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 4cd39f304e20..93ae33248f42 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -513,16 +513,23 @@ static int pm_cpu_check(const struct x86_cpu_id *c)
static void pm_save_spec_msr(void) { - u32 spec_msr_id[] = { - MSR_IA32_SPEC_CTRL, - MSR_IA32_TSX_CTRL, - MSR_TSX_FORCE_ABORT, - MSR_IA32_MCU_OPT_CTRL, - MSR_AMD64_LS_CFG, - MSR_AMD64_DE_CFG, + struct msr_enumeration { + u32 msr_no; + u32 feature; + } msr_enum[] = { + { MSR_IA32_SPEC_CTRL, X86_FEATURE_MSR_SPEC_CTRL }, + { MSR_IA32_TSX_CTRL, X86_FEATURE_MSR_TSX_CTRL }, + { MSR_TSX_FORCE_ABORT, X86_FEATURE_TSX_FORCE_ABORT }, + { MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL }, + { MSR_AMD64_LS_CFG, X86_FEATURE_LS_CFG_SSBD }, + { MSR_AMD64_DE_CFG, X86_FEATURE_LFENCE_RDTSC }, }; + int i;
- msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id)); + for (i = 0; i < ARRAY_SIZE(msr_enum); i++) { + if (boot_cpu_has(msr_enum[i].feature)) + msr_build_context(&msr_enum[i].msr_no, 1); + } }
static int pm_check_save_msr(void) diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 30b15a9a47c4..249f489d115f 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -615,6 +615,10 @@ struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio) struct bfq_group *bfqg;
while (blkg) { + if (!blkg->online) { + blkg = blkg->parent; + continue; + } bfqg = blkg_to_bfqg(blkg); if (bfqg->online) { bio_associate_blkg_from_css(bio, &blkg->blkcg->css); diff --git a/block/blk-mq.c b/block/blk-mq.c index 4402e4ecb8b1..3f1f5e3e0951 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3956,9 +3956,14 @@ EXPORT_SYMBOL(__blk_mq_alloc_disk); struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q, struct lock_class_key *lkclass) { + struct gendisk *disk; + if (!blk_get_queue(q)) return NULL; - return __alloc_disk_node(q, NUMA_NO_NODE, lkclass); + disk = __alloc_disk_node(q, NUMA_NO_NODE, lkclass); + if (!disk) + blk_put_queue(q); + return disk; } EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
diff --git a/block/blk-settings.c b/block/blk-settings.c index 4949ed3ce7c9..8ac1038d0c79 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -59,7 +59,6 @@ void blk_set_default_limits(struct queue_limits *lim) lim->zone_write_granularity = 0; lim->dma_alignment = 511; } -EXPORT_SYMBOL(blk_set_default_limits);
/** * blk_set_stacking_limits - set default limits for stacking devices diff --git a/block/blk.h b/block/blk.h index 52432eab621e..ff0bec16f0fa 100644 --- a/block/blk.h +++ b/block/blk.h @@ -324,6 +324,7 @@ void blk_rq_set_mixed_merge(struct request *rq); bool blk_rq_merge_ok(struct request *rq, struct bio *bio); enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
+void blk_set_default_limits(struct queue_limits *lim); int blk_dev_init(void);
/* diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 68a566f69684..aae9261c424a 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -578,6 +578,20 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"), }, }, + /* + * Models which have nvidia-ec-wmi support, but should not use it. + * Note this indicates a likely firmware bug on these models and should + * be revisited if/when Linux gets support for dynamic mux mode. + */ + { + .callback = video_detect_force_native, + /* Dell G15 5515 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"), + }, + }, + /* * Desktops which falsely report a backlight and which our heuristics * for this do not catch. diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 9b1778c00610..64999777e0bf 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -760,6 +760,12 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, const char *failure_string; struct binder_buffer *buffer;
+ if (unlikely(vma->vm_mm != alloc->vma_vm_mm)) { + ret = -EINVAL; + failure_string = "invalid vma->vm_mm"; + goto err_invalid_mm; + } + mutex_lock(&binder_alloc_mmap_lock); if (alloc->buffer_size) { ret = -EBUSY; @@ -806,6 +812,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, alloc->buffer_size = 0; err_already_mapped: mutex_unlock(&binder_alloc_mmap_lock); +err_invalid_mm: binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "%s: %d %lx-%lx %s failed %d\n", __func__, alloc->pid, vma->vm_start, vma->vm_end, diff --git a/drivers/bus/intel-ixp4xx-eb.c b/drivers/bus/intel-ixp4xx-eb.c index a4388440aca7..91db001eb69a 100644 --- a/drivers/bus/intel-ixp4xx-eb.c +++ b/drivers/bus/intel-ixp4xx-eb.c @@ -49,7 +49,7 @@ #define IXP4XX_EXP_SIZE_SHIFT 10 #define IXP4XX_EXP_CNFG_0 BIT(9) /* Always zero */ #define IXP43X_EXP_SYNC_INTEL BIT(8) /* Only on IXP43x */ -#define IXP43X_EXP_EXP_CHIP BIT(7) /* Only on IXP43x */ +#define IXP43X_EXP_EXP_CHIP BIT(7) /* Only on IXP43x, dangerous to touch on IXP42x */ #define IXP4XX_EXP_BYTE_RD16 BIT(6) #define IXP4XX_EXP_HRDY_POL BIT(5) /* Only on IXP42x */ #define IXP4XX_EXP_MUX_EN BIT(4) @@ -57,8 +57,6 @@ #define IXP4XX_EXP_WORD BIT(2) /* Always zero */ #define IXP4XX_EXP_WR_EN BIT(1) #define IXP4XX_EXP_BYTE_EN BIT(0) -#define IXP42X_RESERVED (BIT(30)|IXP4XX_EXP_CNFG_0|BIT(8)|BIT(7)|IXP4XX_EXP_WORD) -#define IXP43X_RESERVED (BIT(30)|IXP4XX_EXP_CNFG_0|BIT(5)|IXP4XX_EXP_WORD)
#define IXP4XX_EXP_CNFG0 0x20 #define IXP4XX_EXP_CNFG0_MEM_MAP BIT(31) @@ -252,10 +250,9 @@ static void ixp4xx_exp_setup_chipselect(struct ixp4xx_eb *eb, cs_cfg |= val << IXP4XX_EXP_CYC_TYPE_SHIFT; }
- if (eb->is_42x) - cs_cfg &= ~IXP42X_RESERVED; if (eb->is_43x) { - cs_cfg &= ~IXP43X_RESERVED; + /* Should always be zero */ + cs_cfg &= ~IXP4XX_EXP_WORD; /* * This bit for Intel strata flash is currently unused, but let's * report it if we find one. diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 4cd2e127946e..3aa91aed3bf7 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -267,6 +267,9 @@ EXPORT_SYMBOL_GPL(sunxi_rsb_driver_register); /* common code that starts a transfer */ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb) { + u32 int_mask, status; + bool timeout; + if (readl(rsb->regs + RSB_CTRL) & RSB_CTRL_START_TRANS) { dev_dbg(rsb->dev, "RSB transfer still in progress\n"); return -EBUSY; @@ -274,13 +277,23 @@ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
reinit_completion(&rsb->complete);
- writel(RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER, - rsb->regs + RSB_INTE); + int_mask = RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER; + writel(int_mask, rsb->regs + RSB_INTE); writel(RSB_CTRL_START_TRANS | RSB_CTRL_GLOBAL_INT_ENB, rsb->regs + RSB_CTRL);
- if (!wait_for_completion_io_timeout(&rsb->complete, - msecs_to_jiffies(100))) { + if (irqs_disabled()) { + timeout = readl_poll_timeout_atomic(rsb->regs + RSB_INTS, + status, (status & int_mask), + 10, 100000); + writel(status, rsb->regs + RSB_INTS); + } else { + timeout = !wait_for_completion_io_timeout(&rsb->complete, + msecs_to_jiffies(100)); + status = rsb->status; + } + + if (timeout) { dev_dbg(rsb->dev, "RSB timeout\n");
/* abort the transfer */ @@ -292,18 +305,18 @@ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb) return -ETIMEDOUT; }
- if (rsb->status & RSB_INTS_LOAD_BSY) { + if (status & RSB_INTS_LOAD_BSY) { dev_dbg(rsb->dev, "RSB busy\n"); return -EBUSY; }
- if (rsb->status & RSB_INTS_TRANS_ERR) { - if (rsb->status & RSB_INTS_TRANS_ERR_ACK) { + if (status & RSB_INTS_TRANS_ERR) { + if (status & RSB_INTS_TRANS_ERR_ACK) { dev_dbg(rsb->dev, "RSB slave nack\n"); return -EINVAL; }
- if (rsb->status & RSB_INTS_TRANS_ERR_DATA) { + if (status & RSB_INTS_TRANS_ERR_DATA) { dev_dbg(rsb->dev, "RSB transfer data error\n"); return -EIO; } @@ -812,14 +825,6 @@ static int sunxi_rsb_remove(struct platform_device *pdev) return 0; }
-static void sunxi_rsb_shutdown(struct platform_device *pdev) -{ - struct sunxi_rsb *rsb = platform_get_drvdata(pdev); - - pm_runtime_disable(&pdev->dev); - sunxi_rsb_hw_exit(rsb); -} - static const struct dev_pm_ops sunxi_rsb_dev_pm_ops = { SET_RUNTIME_PM_OPS(sunxi_rsb_runtime_suspend, sunxi_rsb_runtime_resume, NULL) @@ -835,7 +840,6 @@ MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table); static struct platform_driver sunxi_rsb_driver = { .probe = sunxi_rsb_probe, .remove = sunxi_rsb_remove, - .shutdown = sunxi_rsb_shutdown, .driver = { .name = RSB_CTRL_NAME, .of_match_table = sunxi_rsb_of_match_table, diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index 55516043b656..8184378f67ef 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -35,7 +35,7 @@ config X86_PCC_CPUFREQ If in doubt, say N.
config X86_AMD_PSTATE - tristate "AMD Processor P-State driver" + bool "AMD Processor P-State driver" depends on X86 && ACPI select ACPI_PROCESSOR select ACPI_CPPC_LIB if X86_64 diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index d63a28c5f95a..5b788492ce5e 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -483,12 +483,22 @@ static void amd_pstate_boost_init(struct amd_cpudata *cpudata) amd_pstate_driver.boost_enabled = true; }
+static void amd_perf_ctl_reset(unsigned int cpu) +{ + wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0); +} + static int amd_pstate_cpu_init(struct cpufreq_policy *policy) { int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret; struct device *dev; struct amd_cpudata *cpudata;
+ /* + * Resetting PERF_CTL_MSR will put the CPU in P0 frequency, + * which is ideal for initialization process. + */ + amd_perf_ctl_reset(policy->cpu); dev = get_cpu_device(policy->cpu); if (!dev) return -ENODEV; @@ -718,16 +728,7 @@ static int __init amd_pstate_init(void)
return ret; } - -static void __exit amd_pstate_exit(void) -{ - cpufreq_unregister_driver(&amd_pstate_driver); - - amd_pstate_enable(false); -} - -module_init(amd_pstate_init); -module_exit(amd_pstate_exit); +device_initcall(amd_pstate_init);
MODULE_AUTHOR("Huang Rui ray.huang@amd.com"); MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver"); diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index efb4990b29e1..c6f08066e165 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -15,6 +15,7 @@ #include <linux/slab.h> #include <linux/dma-buf.h> #include <linux/dma-fence.h> +#include <linux/dma-fence-unwrap.h> #include <linux/anon_inodes.h> #include <linux/export.h> #include <linux/debugfs.h> @@ -391,8 +392,10 @@ static long dma_buf_import_sync_file(struct dma_buf *dmabuf, const void __user *user_data) { struct dma_buf_import_sync_file arg; - struct dma_fence *fence; + struct dma_fence *fence, *f; enum dma_resv_usage usage; + struct dma_fence_unwrap iter; + unsigned int num_fences; int ret = 0;
if (copy_from_user(&arg, user_data, sizeof(arg))) @@ -411,13 +414,21 @@ static long dma_buf_import_sync_file(struct dma_buf *dmabuf, usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ;
- dma_resv_lock(dmabuf->resv, NULL); + num_fences = 0; + dma_fence_unwrap_for_each(f, &iter, fence) + ++num_fences;
- ret = dma_resv_reserve_fences(dmabuf->resv, 1); - if (!ret) - dma_resv_add_fence(dmabuf->resv, fence, usage); + if (num_fences > 0) { + dma_resv_lock(dmabuf->resv, NULL);
- dma_resv_unlock(dmabuf->resv); + ret = dma_resv_reserve_fences(dmabuf->resv, num_fences); + if (!ret) { + dma_fence_unwrap_for_each(f, &iter, fence) + dma_resv_add_fence(dmabuf->resv, f, usage); + } + + dma_resv_unlock(dmabuf->resv); + }
dma_fence_put(fence);
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c index 8f5848aa144f..59d158873f4c 100644 --- a/drivers/dma-buf/dma-heap.c +++ b/drivers/dma-buf/dma-heap.c @@ -233,18 +233,6 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) return ERR_PTR(-EINVAL); }
- /* check the name is unique */ - mutex_lock(&heap_list_lock); - list_for_each_entry(h, &heap_list, list) { - if (!strcmp(h->name, exp_info->name)) { - mutex_unlock(&heap_list_lock); - pr_err("dma_heap: Already registered heap named %s\n", - exp_info->name); - return ERR_PTR(-EINVAL); - } - } - mutex_unlock(&heap_list_lock); - heap = kzalloc(sizeof(*heap), GFP_KERNEL); if (!heap) return ERR_PTR(-ENOMEM); @@ -283,13 +271,27 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) err_ret = ERR_CAST(dev_ret); goto err2; } - /* Add heap to the list */ + mutex_lock(&heap_list_lock); + /* check the name is unique */ + list_for_each_entry(h, &heap_list, list) { + if (!strcmp(h->name, exp_info->name)) { + mutex_unlock(&heap_list_lock); + pr_err("dma_heap: Already registered heap named %s\n", + exp_info->name); + err_ret = ERR_PTR(-EINVAL); + goto err3; + } + } + + /* Add heap to the list */ list_add(&heap->list, &heap_list); mutex_unlock(&heap_list_lock);
return heap;
+err3: + device_destroy(dma_heap_class, heap->heap_devt); err2: cdev_del(&heap->heap_cdev); err1: diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index 6c416955da53..bbe0a7cabb75 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig @@ -246,7 +246,9 @@ config FPGA_MGR_VERSAL_FPGA
config FPGA_M10_BMC_SEC_UPDATE tristate "Intel MAX10 BMC Secure Update driver" - depends on MFD_INTEL_M10_BMC && FW_UPLOAD + depends on MFD_INTEL_M10_BMC + select FW_LOADER + select FW_UPLOAD help Secure update support for the Intel MAX10 board management controller. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c index c8935d718207..4485bb29bec9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c @@ -41,5 +41,6 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = { .get_atc_vmid_pasid_mapping_info = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, + .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 93ad00453f4b..7db4aef9c45c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -170,9 +170,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > kfd_mem_limit.max_ttm_mem_limit) || (adev && adev->kfd.vram_used + vram_needed > - adev->gmc.real_vram_size - - atomic64_read(&adev->vram_pin_size) - - reserved_for_pt)) { + adev->gmc.real_vram_size - reserved_for_pt)) { ret = -ENOMEM; goto release; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 491d4846fc02..cfb262911bfc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -328,7 +328,6 @@ static void amdgpu_connector_free_edid(struct drm_connector *connector)
kfree(amdgpu_connector->edid); amdgpu_connector->edid = NULL; - drm_connector_update_edid_property(connector, NULL); }
static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 8ef31d687ef3..111484ceb47d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -413,11 +413,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, if (r) goto release_object;
- if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) { - r = amdgpu_mn_register(bo, args->addr); - if (r) - goto release_object; - } + r = amdgpu_mn_register(bo, args->addr); + if (r) + goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index aebc384531ac..bff5d8c832ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -479,6 +479,12 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev) unsigned i; unsigned vmhub, inv_eng;
+ if (adev->enable_mes) { + /* reserve engine 5 for firmware */ + for (vmhub = 0; vmhub < AMDGPU_MAX_VMHUBS; vmhub++) + vm_inv_engs[vmhub] &= ~(1 << 5); + } + for (i = 0; i < adev->num_rings; ++i) { ring = adev->rings[i]; vmhub = ring->funcs->vmhub; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index c9dec2434f37..cfa45a697d24 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -171,6 +171,7 @@ void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) { amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, &mem_ctx->shared_buf); + mem_ctx->shared_bo = NULL; }
static void psp_free_shared_bufs(struct psp_context *psp) @@ -181,6 +182,7 @@ static void psp_free_shared_bufs(struct psp_context *psp) /* free TMR memory buffer */ pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); + psp->tmr_bo = NULL;
/* free xgmi shared memory */ psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context); @@ -728,7 +730,7 @@ static int psp_load_toc(struct psp_context *psp, /* Set up Trusted Memory Region */ static int psp_tmr_init(struct psp_context *psp) { - int ret; + int ret = 0; int tmr_size; void *tmr_buf; void **pptr; @@ -755,10 +757,12 @@ static int psp_tmr_init(struct psp_context *psp) } }
- pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; - ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT, - AMDGPU_GEM_DOMAIN_VRAM, - &psp->tmr_bo, &psp->tmr_mc_addr, pptr); + if (!psp->tmr_bo) { + pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; + ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT, + AMDGPU_GEM_DOMAIN_VRAM, + &psp->tmr_bo, &psp->tmr_mc_addr, pptr); + }
return ret; } @@ -2720,8 +2724,6 @@ static int psp_suspend(void *handle) }
out: - psp_free_shared_bufs(psp); - return ret; }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 04130f8813ef..369c0d03e3c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -143,32 +143,6 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, return 0; }
-/* - * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS - * happens while holding this lock anywhere to prevent deadlocks when - * an MMU notifier runs in reclaim-FS context. - */ -static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) -{ - mutex_lock(&vm->eviction_lock); - vm->saved_flags = memalloc_noreclaim_save(); -} - -static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm) -{ - if (mutex_trylock(&vm->eviction_lock)) { - vm->saved_flags = memalloc_noreclaim_save(); - return 1; - } - return 0; -} - -static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm) -{ - memalloc_noreclaim_restore(vm->saved_flags); - mutex_unlock(&vm->eviction_lock); -} - /** * amdgpu_vm_bo_evicted - vm_bo is evicted * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 278512535b51..39d2898caede 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -503,4 +503,30 @@ static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm) return atomic64_read(&vm->tlb_seq); }
+/* + * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS + * happens while holding this lock anywhere to prevent deadlocks when + * an MMU notifier runs in reclaim-FS context. + */ +static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) +{ + mutex_lock(&vm->eviction_lock); + vm->saved_flags = memalloc_noreclaim_save(); +} + +static inline bool amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm) +{ + if (mutex_trylock(&vm->eviction_lock)) { + vm->saved_flags = memalloc_noreclaim_save(); + return true; + } + return false; +} + +static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm) +{ + memalloc_noreclaim_restore(vm->saved_flags); + mutex_unlock(&vm->eviction_lock); +} + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 88de9f0d4728..983899574464 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -597,7 +597,9 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev, if (entry->bo) return 0;
+ amdgpu_vm_eviction_unlock(vm); r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt); + amdgpu_vm_eviction_lock(vm); if (r) return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 3bff0ae15e64..3175b9c1849d 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -190,7 +190,11 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes, mes_add_queue_pkt.trap_handler_addr = input->tba_addr; mes_add_queue_pkt.tma_addr = input->tma_addr; mes_add_queue_pkt.is_kfd_process = input->is_kfd_process; - mes_add_queue_pkt.trap_en = 1; + + if (!(((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 4) && + (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) && + (adev->ip_versions[GC_HWIP][0] <= IP_VERSION(11, 0, 3)))) + mes_add_queue_pkt.trap_en = 1;
/* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */ mes_add_queue_pkt.is_aql_queue = input->is_aql_queue; diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index 60a81649cf12..0c4c5499bb5c 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -742,7 +742,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = { 0xbf88fffe, 0x877aff7f, 0x04000000, 0x8f7a857a, 0x886d7a6d, 0xb97b02dc, - 0x8f7b997b, 0xb97a2a05, + 0x8f7b997b, 0xb97a3a05, 0x807a817a, 0xbf0d997b, 0xbf850002, 0x8f7a897a, 0xbf820001, 0x8f7a8a7a, @@ -819,7 +819,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = { 0xbefe037c, 0xbefc0370, 0xf4611c7a, 0xf8000000, 0x80708470, 0xbefc037e, - 0xb9702a05, 0x80708170, + 0xb9703a05, 0x80708170, 0xbf0d9973, 0xbf850002, 0x8f708970, 0xbf820001, 0x8f708a70, 0xb97a1e06, @@ -1069,7 +1069,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = { 0xb9f9f816, 0x876f7bff, 0xfffff800, 0x906f8b6f, 0xb9efa2c3, 0xb9f3f801, - 0xb96e2a05, 0x806e816e, + 0xb96e3a05, 0x806e816e, 0xbf0d9972, 0xbf850002, 0x8f6e896e, 0xbf820001, 0x8f6e8a6e, 0xb96f1e06, @@ -2114,7 +2114,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x007a0000, 0x7e000280, 0xbefe037a, 0xbeff037b, 0xb97b02dc, 0x8f7b997b, - 0xb97a2a05, 0x807a817a, + 0xb97a3a05, 0x807a817a, 0xbf0d997b, 0xbf850002, 0x8f7a897a, 0xbf820001, 0x8f7a8a7a, 0xb97b1e06, @@ -2157,7 +2157,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x01000000, 0xe0704100, 0x705d0100, 0xe0704200, 0x705d0200, 0xe0704300, - 0x705d0300, 0xb9702a05, + 0x705d0300, 0xb9703a05, 0x80708170, 0xbf0d9973, 0xbf850002, 0x8f708970, 0xbf820001, 0x8f708a70, @@ -2189,7 +2189,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0xbefe03ff, 0x0000ffff, 0xbeff0380, 0xe0704000, 0x705d0200, 0xbefe03c1, - 0xb9702a05, 0x80708170, + 0xb9703a05, 0x80708170, 0xbf0d9973, 0xbf850002, 0x8f708970, 0xbf820001, 0x8f708a70, 0xb97a1e06, @@ -2475,7 +2475,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0xb9ef4803, 0x876f7bff, 0xfffff800, 0x906f8b6f, 0xb9efa2c3, 0xb9f3f801, - 0xb96e2a05, 0x806e816e, + 0xb96e3a05, 0x806e816e, 0xbf0d9972, 0xbf850002, 0x8f6e896e, 0xbf820001, 0x8f6e8a6e, 0xb96f1e06, @@ -2494,11 +2494,13 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0x00000000, }; - static const uint32_t cwsr_trap_gfx11_hex[] = { - 0xbfa00001, 0xbfa0021b, + 0xbfa00001, 0xbfa00221, 0xb0804006, 0xb8f8f802, - 0x91788678, 0xb8fbf803, + 0x9178ff78, 0x00020006, + 0xb8fbf803, 0xbf0d9e6d, + 0xbfa10001, 0xbfbd0000, + 0xbf0d9f6d, 0xbfa20006, 0x8b6eff78, 0x00002000, 0xbfa10009, 0x8b6eff6d, 0x00ff0000, 0xbfa2001e, @@ -2766,7 +2768,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = { 0x701d0000, 0x807d817d, 0x8070ff70, 0x00000080, 0xbf0a7b7d, 0xbfa2fff8, - 0xbfa00141, 0xbef4007e, + 0xbfa00146, 0xbef4007e, 0x8b75ff7f, 0x0000ffff, 0x8c75ff75, 0x00040000, 0xbef60080, 0xbef700ff, @@ -2926,8 +2928,11 @@ static const uint32_t cwsr_trap_gfx11_hex[] = { 0xf8000074, 0xbf89fc07, 0x8b6dff6d, 0x0000ffff, 0x8bfe7e7e, 0x8bea6a6a, - 0xb97af802, 0xbe804a6c, - 0xbfb00000, 0xbf9f0000, + 0xb8eef802, 0xbf0d866e, + 0xbfa20002, 0xb97af802, + 0xbe80486c, 0xb97af802, + 0xbe804a6c, 0xbfb00000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, + 0xbf9f0000, 0x00000000, }; diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm index 250ab007399b..8b92c33c2a7c 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm @@ -43,12 +43,14 @@ #define HAVE_XNACK (ASIC_FAMILY < CHIP_SIENNA_CICHLID) #define HAVE_SENDMSG_RTN (ASIC_FAMILY >= CHIP_PLUM_BONITO) #define HAVE_BUFFER_LDS_LOAD (ASIC_FAMILY < CHIP_PLUM_BONITO) +#define SW_SA_TRAP (ASIC_FAMILY >= CHIP_PLUM_BONITO)
var SINGLE_STEP_MISSED_WORKAROUND = 1 //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006 var SQ_WAVE_STATUS_HALT_MASK = 0x2000 var SQ_WAVE_STATUS_ECC_ERR_MASK = 0x20000 +var SQ_WAVE_STATUS_TRAP_EN_SHIFT = 6
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12 var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9 @@ -183,6 +185,19 @@ L_SKIP_RESTORE:
s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+#if SW_SA_TRAP + // If ttmp1[30] is set then issue s_barrier to unblock dependent waves. + s_bitcmp1_b32 s_save_pc_hi, 30 + s_cbranch_scc0 L_TRAP_NO_BARRIER + s_barrier + +L_TRAP_NO_BARRIER: + // If ttmp1[31] is set then trap may occur early. + // Spin wait until SAVECTX exception is raised. + s_bitcmp1_b32 s_save_pc_hi, 31 + s_cbranch_scc1 L_CHECK_SAVE +#endif + s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK s_cbranch_scc0 L_NOT_HALTED
@@ -1061,8 +1076,20 @@ L_RESTORE_HWREG: s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32 s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32 + +#if SW_SA_TRAP + // If traps are enabled then return to the shader with PRIV=0. + // Otherwise retain PRIV=1 for subsequent context save requests. + s_getreg_b32 s_restore_tmp, hwreg(HW_REG_STATUS) + s_bitcmp1_b32 s_restore_tmp, SQ_WAVE_STATUS_TRAP_EN_SHIFT + s_cbranch_scc1 L_RETURN_WITHOUT_PRIV + s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu + s_setpc_b64 [s_restore_pc_lo, s_restore_pc_hi] +L_RETURN_WITHOUT_PRIV: +#endif
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
L_END_PGM: diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 7f8eb09b0b7c..4b16d9d1e058 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1371,7 +1371,44 @@ static const struct dmi_system_id hpd_disconnect_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), }, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), + }, + }, {} + /* TODO: refactor this from a fixed table to a dynamic option */ };
static void retrieve_dmi_info(struct amdgpu_display_manager *dm) @@ -7650,9 +7687,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count];
- fill_dc_dirty_rects(plane, old_plane_state, new_plane_state, - new_crtc_state, - &bundle->flip_addrs[planes_count]); + if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) + fill_dc_dirty_rects(plane, old_plane_state, + new_plane_state, new_crtc_state, + &bundle->flip_addrs[planes_count]);
/* * Only allow immediate flips for fast updates that don't diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index dbf8158b832e..fcddf60d3c10 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -746,6 +746,9 @@ struct dc_debug_options { bool force_disable_subvp; bool force_subvp_mclk_switch; bool allow_sw_cursor_fallback; + unsigned int force_subvp_num_ways; + unsigned int force_mall_ss_num_ways; + bool alloc_extra_way_for_cursor; bool force_usr_allow; /* uses value at boot and disables switch */ bool disable_dtb_ref_clk_switch; diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 1b70b78e2fa1..af631085e88c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -359,7 +359,8 @@ static const struct dce_audio_registers audio_regs[] = { audio_regs(2), audio_regs(3), audio_regs(4), - audio_regs(5) + audio_regs(5), + audio_regs(6), };
#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index c72166e096ba..bbc0bfbec6c4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -49,6 +49,7 @@ #include "dcn20/dcn20_optc.h" #include "dmub_subvp_state.h" #include "dce/dmub_hw_lock_mgr.h" +#include "dcn32_resource.h" #include "dc_link_dp.h" #include "dmub/inc/dmub_subvp_state.h"
@@ -198,42 +199,6 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc) return false; }
-/* This function takes in the start address and surface size to be cached in CAB - * and calculates the total number of cache lines required to store the surface. - * The number of cache lines used for each surface is calculated independently of - * one another. For example, if there is a primary surface(1), meta surface(2), and - * cursor(3), this function should be called 3 times to calculate the number of cache - * lines used for each of those surfaces. - */ -static uint32_t dcn32_cache_lines_for_surface(struct dc *dc, uint32_t surface_size, uint64_t start_address) -{ - uint32_t lines_used = 1; - uint32_t num_cached_bytes = 0; - uint32_t remaining_size = 0; - uint32_t cache_line_size = dc->caps.cache_line_size; - uint32_t remainder = 0; - - /* 1. Calculate surface size minus the number of bytes stored - * in the first cache line (all bytes in first cache line might - * not be fully used). - */ - div_u64_rem(start_address, cache_line_size, &remainder); - num_cached_bytes = cache_line_size - remainder; - remaining_size = surface_size - num_cached_bytes; - - /* 2. Calculate number of cache lines that will be fully used with - * the remaining number of bytes to be stored. - */ - lines_used += (remaining_size / cache_line_size); - - /* 3. Check if we need an extra line due to the remaining size not being - * a multiple of CACHE_LINE_SIZE. - */ - if (remaining_size % cache_line_size > 0) - lines_used++; - - return lines_used; -}
/* This function loops through every surface that needs to be cached in CAB for SS, * and calculates the total number of ways required to store all surfaces (primary, @@ -241,94 +206,115 @@ static uint32_t dcn32_cache_lines_for_surface(struct dc *dc, uint32_t surface_si */ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx) { - uint8_t i, j; + uint8_t i; + int j; struct dc_stream_state *stream = NULL; struct dc_plane_state *plane = NULL; - uint32_t surface_size = 0; uint32_t cursor_size = 0; - uint32_t cache_lines_used = 0; uint32_t total_lines = 0; uint32_t lines_per_way = 0; - uint32_t num_ways = 0; - uint32_t prev_addr_low = 0; + uint8_t num_ways = 0; + uint8_t bytes_per_pixel = 0; + uint8_t cursor_bpp = 0; + uint16_t mblk_width = 0; + uint16_t mblk_height = 0; + uint16_t mall_alloc_width_blk_aligned = 0; + uint16_t mall_alloc_height_blk_aligned = 0; + uint16_t num_mblks = 0; + uint32_t bytes_in_mall = 0; + uint32_t cache_lines_used = 0; + uint32_t cache_lines_per_plane = 0;
- for (i = 0; i < ctx->stream_count; i++) { - stream = ctx->streams[i]; + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- // Don't include PSR surface in the total surface size for CAB allocation - if (stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) + if (!pipe->stream || !pipe->plane_state || + pipe->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED || + pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) continue;
- if (ctx->stream_status[i].plane_count == 0) - continue; + bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4; + mblk_width = DCN3_2_MBLK_WIDTH; + mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE;
- // For each stream, loop through each plane to calculate the number of cache - // lines required to store the surface in CAB - for (j = 0; j < ctx->stream_status[i].plane_count; j++) { - plane = ctx->stream_status[i].plane_states[j]; + /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) - + * FLOOR(vp_x_start, blk_width) + * + * mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c + */ + mall_alloc_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x + + pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) + + (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width); + + /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) - + * FLOOR(vp_y_start, blk_height) + * + * mall_alloc_height_blk_aligned_l/c = full_vp_height_blk_aligned_l/c + */ + mall_alloc_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y + + pipe->plane_res.scl_data.viewport.height + mblk_height - 1) / mblk_height * mblk_height) + + (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height);
- // Calculate total surface size - if (prev_addr_low != plane->address.grph.addr.u.low_part) { - /* if plane address are different from prev FB, then userspace allocated separate FBs*/ - surface_size += plane->plane_size.surface_pitch * - plane->plane_size.surface_size.height * - (plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4); + num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) * + ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height);
- prev_addr_low = plane->address.grph.addr.u.low_part; - } else { - /* We have the same fb for all the planes. - * Xorg always creates one giant fb that holds all surfaces, - * so allocating it once is sufficient. - * */ - continue; - } - // Convert surface size + starting address to number of cache lines required - // (alignment accounted for) - cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size, - plane->address.grph.addr.quad_part); - - if (plane->address.grph.meta_addr.quad_part) { - // Meta surface - cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size, - plane->address.grph.meta_addr.quad_part); - } - } + /* For DCC: + * meta_num_mblk = CEILING(full_mblk_width_ub_l*full_mblk_height_ub_l*Bpe/256/mblk_bytes, 1) + */ + if (pipe->plane_state->dcc.enable) + num_mblks += (mall_alloc_width_blk_aligned * mall_alloc_width_blk_aligned * bytes_per_pixel + + (256 * DCN3_2_MALL_MBLK_SIZE_BYTES) - 1) / (256 * DCN3_2_MALL_MBLK_SIZE_BYTES);
- // Include cursor size for CAB allocation - for (j = 0; j < dc->res_pool->pipe_count; j++) { - struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j]; - struct hubp *hubp = pipe->plane_res.hubp; + bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
- if (pipe->stream && pipe->plane_state && hubp) - /* Find the cursor plane and use the exact size instead of - * using the max for calculation - */ - if (hubp->curs_attr.width > 0) { - cursor_size = hubp->curs_attr.width * hubp->curs_attr.height; - break; - } - } + /* (cache lines used is total bytes / cache_line size. Add +2 for worst case alignment + * (MALL is 64-byte aligned) + */ + cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2; + cache_lines_used += cache_lines_per_plane; + }
- switch (stream->cursor_attributes.color_format) { - case CURSOR_MODE_MONO: - cursor_size /= 2; - break; - case CURSOR_MODE_COLOR_1BIT_AND: - case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: - case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: - cursor_size *= 4; - break; + // Include cursor size for CAB allocation + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j]; + struct hubp *hubp = pipe->plane_res.hubp;
- case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: - case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: - cursor_size *= 8; - break; - } + if (pipe->stream && pipe->plane_state && hubp) + /* Find the cursor plane and use the exact size instead of + using the max for calculation */
- if (stream->cursor_position.enable && plane->address.grph.cursor_cache_addr.quad_part) { - cache_lines_used += dcn32_cache_lines_for_surface(dc, cursor_size, - plane->address.grph.cursor_cache_addr.quad_part); - } + if (hubp->curs_attr.width > 0) { + cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height; + + switch (pipe->stream->cursor_attributes.color_format) { + case CURSOR_MODE_MONO: + cursor_size /= 2; + cursor_bpp = 4; + break; + case CURSOR_MODE_COLOR_1BIT_AND: + case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: + case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: + cursor_size *= 4; + cursor_bpp = 4; + break; + + case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: + case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: + cursor_size *= 8; + cursor_bpp = 8; + break; + } + + if (pipe->stream->cursor_position.enable && !dc->debug.alloc_extra_way_for_cursor && + cursor_size > 16384) { + /* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1) + */ + cache_lines_used += (((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / + DCN3_2_MALL_MBLK_SIZE_BYTES) * DCN3_2_MALL_MBLK_SIZE_BYTES) / + dc->caps.cache_line_size + 2; + } + break; + } }
// Convert number of cache lines required to number of ways @@ -345,8 +331,8 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c plane = ctx->stream_status[i].plane_states[j];
if (stream->cursor_position.enable && plane && - !plane->address.grph.cursor_cache_addr.quad_part && - cursor_size > 16384) { + dc->debug.alloc_extra_way_for_cursor && + cursor_size > 16384) { /* Cursor caching is not supported since it won't be on the same line. * So we need an extra line to accommodate it. With large cursors and a single 4k monitor * this case triggers corruption. If we're at the edge, then dont trigger display refresh @@ -358,7 +344,9 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c } } } - + if (dc->debug.force_mall_ss_num_ways > 0) { + num_ways = dc->debug.force_mall_ss_num_ways; + } return num_ways; }
@@ -741,10 +729,7 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context) struct hubp *hubp = pipe->plane_res.hubp;
if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) { - //Round cursor width up to next multiple of 64 - int cursor_width = ((hubp->curs_attr.width + 63) / 64) * 64; - int cursor_height = hubp->curs_attr.height; - int cursor_size = cursor_width * cursor_height; + int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
switch (hubp->curs_attr.color_format) { case CURSOR_MODE_MONO: diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index c3b783cea8a0..6f1bcb45a3b2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -872,6 +872,7 @@ static const struct dc_debug_options debug_defaults_drv = { .enable_single_display_2to1_odm_policy = true, .enable_dp_dig_pixel_rate_div_policy = 1, .allow_sw_cursor_fallback = false, + .alloc_extra_way_for_cursor = true, };
static const struct dc_debug_options debug_defaults_diags = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index 13cd1f2e50ca..7c37575d69c7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -54,13 +54,14 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat uint32_t num_mblks = 0; uint32_t cache_lines_per_plane = 0; uint32_t i = 0, j = 0; - uint32_t mblk_width = 0; - uint32_t mblk_height = 0; + uint16_t mblk_width = 0; + uint16_t mblk_height = 0; uint32_t full_vp_width_blk_aligned = 0; uint32_t full_vp_height_blk_aligned = 0; uint32_t mall_alloc_width_blk_aligned = 0; uint32_t mall_alloc_height_blk_aligned = 0; - uint32_t full_vp_height = 0; + uint16_t full_vp_height = 0; + bool subvp_in_use = false;
for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -70,6 +71,7 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { struct pipe_ctx *main_pipe = NULL;
+ subvp_in_use = true; /* Get full viewport height from main pipe (required for MBLK calculation) */ for (j = 0; j < dc->res_pool->pipe_count; j++) { main_pipe = &context->res_ctx.pipe_ctx[j]; @@ -129,6 +131,9 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat if (cache_lines_used % lines_per_way > 0) num_ways++;
+ if (subvp_in_use && dc->debug.force_subvp_num_ways > 0) + num_ways = dc->debug.force_subvp_num_ways; + return num_ways; }
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index 7309eed33a61..d074716dc197 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -873,6 +873,7 @@ static const struct dc_debug_options debug_defaults_drv = { .enable_single_display_2to1_odm_policy = true, .enable_dp_dig_pixel_rate_div_policy = 1, .allow_sw_cursor_fallback = false, + .alloc_extra_way_for_cursor = true, };
static const struct dc_debug_options debug_defaults_diags = { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index b9d3a4000c3d..2f996fdaa70d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -157,7 +157,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = { .dispclk_dppclk_vco_speed_mhz = 4300.0, .do_urgent_latency_adjustment = true, .urgent_latency_adjustment_fabric_clock_component_us = 1.0, - .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000, + .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000, };
void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr) @@ -211,7 +211,7 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr) /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */ if (clk_mgr->base.ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) { clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].valid = true; - clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 38; + clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 50; clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us; clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us; clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; @@ -221,7 +221,7 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr) clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz; clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF; clk_mgr->base.bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz * 16; - clk_mgr->base.bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 38; + clk_mgr->base.bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 50; clk_mgr->base.bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[1].memclk_mhz * 16; clk_mgr->base.bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9; clk_mgr->base.bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz * 16; @@ -1700,6 +1700,12 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, */ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; + /* For DCN32/321 need to validate with fclk pstate change latency equal to dummy so + * prefetch is scheduled correctly to account for dummy pstate. + */ + if (dummy_latency_index == 0) + context->bw_ctx.dml.soc.fclk_change_latency_us = + dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; @@ -1879,6 +1885,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && dummy_latency_index == 0) + context->bw_ctx.dml.soc.fclk_change_latency_us = + dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; + dcn32_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
if (!pstate_en) @@ -1886,8 +1896,12 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(dc, context); + if (dummy_latency_index == 0) + context->bw_ctx.dml.soc.fclk_change_latency_us = + dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us; + } }
static void dcn32_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c index bea380407151..042f9a62c4c5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c @@ -3197,6 +3197,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.FCLKChangeLatency, mode_lib->vba.UrgLatency[i], mode_lib->vba.SREnterPlusExitTime);
+ memset(&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull, 0, sizeof(DmlPipe)); v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.Dppclk = mode_lib->vba.RequiredDPPCLK[i][j][k]; v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.Dispclk = mode_lib->vba.RequiredDISPCLK[i][j]; v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.PixelClock = mode_lib->vba.PixelClock[k]; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index 67af8f4df8b8..d9141ef2fefd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -4396,7 +4396,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
if (v->NumberOfActiveSurfaces > 1) { ActiveClockChangeLatencyHidingY = ActiveClockChangeLatencyHidingY - - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightY[k] * v->HTotal[k] + - (1.0 - 1.0 / v->NumberOfActiveSurfaces) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k]; }
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h index 0b427d89b3c5..f174f5c5ff92 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h @@ -30,7 +30,7 @@ #include "os_types.h" #include "../dc_features.h" #include "../display_mode_structs.h" -#include "dml/display_mode_vba.h" +#include "../display_mode_vba.h"
unsigned int dml32_dscceComputeDelay( unsigned int bpc, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c index aa976fe4d426..0ab8e48b6841 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c @@ -125,9 +125,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = { .sr_enter_plus_exit_z8_time_us = 320, .writeback_latency_us = 12.0, .round_trip_ping_latency_dcfclk_cycles = 263, - .urgent_latency_pixel_data_only_us = 9.35, - .urgent_latency_pixel_mixed_with_vm_data_us = 9.35, - .urgent_latency_vm_data_only_us = 9.35, + .urgent_latency_pixel_data_only_us = 4, + .urgent_latency_pixel_mixed_with_vm_data_us = 4, + .urgent_latency_vm_data_only_us = 4, .fclk_change_latency_us = 20, .usr_retraining_latency_us = 2, .smn_latency_us = 2, @@ -155,7 +155,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = { .dispclk_dppclk_vco_speed_mhz = 4300.0, .do_urgent_latency_adjustment = true, .urgent_latency_adjustment_fabric_clock_component_us = 1.0, - .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000, + .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000, };
static void get_optimal_ntuple(struct _vcs_dpi_voltage_scaling_st *entry) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c index d635b73af46f..0ea52ba5ac82 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c @@ -107,6 +107,13 @@ static const struct ddc_registers ddc_data_regs_dcn[] = { ddc_data_regs_dcn2(3), ddc_data_regs_dcn2(4), ddc_data_regs_dcn2(5), + { + // add a dummy entry for cases no such port + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, + .ddc_setup = 0, + .phy_aux_cntl = 0, + .dc_gpio_aux_ctrl_5 = 0 + }, { DDC_GPIO_VGA_REG_LIST(DATA), .ddc_setup = 0, @@ -121,6 +128,13 @@ static const struct ddc_registers ddc_clk_regs_dcn[] = { ddc_clk_regs_dcn2(3), ddc_clk_regs_dcn2(4), ddc_clk_regs_dcn2(5), + { + // add a dummy entry for cases no such port + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, + .ddc_setup = 0, + .phy_aux_cntl = 0, + .dc_gpio_aux_ctrl_5 = 0 + }, { DDC_GPIO_VGA_REG_LIST(CLK), .ddc_setup = 0, diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c index 6fd38cdd68c0..525bc8881950 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c @@ -94,11 +94,14 @@ static enum gpio_result set_config( * is required for detection of AUX mode */ if (hw_gpio->base.en != GPIO_DDC_LINE_VIP_PAD) { if (!ddc_data_pd_en || !ddc_clk_pd_en) { - - REG_SET_2(gpio.MASK_reg, regval, + if (hw_gpio->base.en == GPIO_DDC_LINE_DDC_VGA) { + // bit 4 of mask has different usage in some cases + REG_SET(gpio.MASK_reg, regval, DC_GPIO_DDC1DATA_PD_EN, 1); + } else { + REG_SET_2(gpio.MASK_reg, regval, DC_GPIO_DDC1DATA_PD_EN, 1, DC_GPIO_DDC1CLK_PD_EN, 1); - + } if (config_data->type == GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE) msleep(3); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c index dad3e3741a4e..190af79f3236 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c @@ -67,22 +67,21 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr, uint32_t *speed) { - uint32_t current_rpm; - uint32_t percent = 0; - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; + struct amdgpu_device *adev = hwmgr->adev; + uint32_t duty100, duty; + uint64_t tmp64;
- if (vega10_get_current_rpm(hwmgr, ¤t_rpm)) - return -1; + duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), + CG_FDO_CTRL1, FMAX_DUTY100); + duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS), + CG_THERMAL_STATUS, FDO_PWM_DUTY);
- if (hwmgr->thermal_controller. - advanceFanControlParameters.usMaxFanRPM != 0) - percent = current_rpm * 255 / - hwmgr->thermal_controller. - advanceFanControlParameters.usMaxFanRPM; + if (!duty100) + return -EINVAL;
- *speed = MIN(percent, 255); + tmp64 = (uint64_t)duty * 255; + do_div(tmp64, duty100); + *speed = MIN((uint32_t)tmp64, 255);
return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 8292839bc42a..9ce0dcc5bb90 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -378,6 +378,10 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu) ((adev->pdev->device == 0x73BF) && (adev->pdev->revision == 0xCF)) || ((adev->pdev->device == 0x7422) && + (adev->pdev->revision == 0x00)) || + ((adev->pdev->device == 0x73A3) && + (adev->pdev->revision == 0x00)) || + ((adev->pdev->device == 0x73E3) && (adev->pdev->revision == 0x00))) smu_baco->platform_support = false;
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index 7a94a5288e8d..855297e69f04 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -5293,7 +5293,7 @@ int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm mst_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(mst_state)) - return -EINVAL; + return PTR_ERR(mst_state);
list_for_each_entry(pos, &mst_state->vcpis, next) {
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 8a0c0e0bb5bd..52d8800a8ab8 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -134,6 +134,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Acer Switch V 10 (SW5-017) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SW5-017"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* Anbernic Win600 */ .matches = { DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Anbernic"), @@ -319,6 +325,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"), }, .driver_data = (void *)&lcd1200x1920_rightside_up, + }, { /* Nanote UMPC-01 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "RWC CO.,LTD"), + DMI_MATCH(DMI_PRODUCT_NAME, "UMPC-01"), + }, + .driver_data = (void *)&lcd1200x1920_rightside_up, }, { /* OneGX1 Pro */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"), diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 589af257edeb..3bb113b42cfa 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -2427,7 +2427,7 @@ intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port) { const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
- if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_io == POWER_DOMAIN_INVALID) + if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID)) return POWER_DOMAIN_PORT_DDI_IO_A;
return domains->ddi_io + (int)(port - domains->port_start); @@ -2438,7 +2438,7 @@ intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port po { const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
- if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_lanes == POWER_DOMAIN_INVALID) + if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID)) return POWER_DOMAIN_PORT_DDI_LANES_A;
return domains->ddi_lanes + (int)(port - domains->port_start); @@ -2464,7 +2464,7 @@ intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch { const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
- if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID) + if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID)) return POWER_DOMAIN_AUX_A;
return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start); @@ -2475,7 +2475,7 @@ intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch au { const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
- if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_tbt == POWER_DOMAIN_INVALID) + if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID)) return POWER_DOMAIN_AUX_TBT1;
return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index e85cfc36359a..f5a803060515 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -642,6 +642,10 @@ static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED);
+ err = ttm_bo_wait(bo, true, false); + if (err) + return err; + err = i915_ttm_move_notify(bo); if (err) return err; diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index f435e06125aa..f158f6a08e75 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -961,6 +961,11 @@ static void mmio_invalidate_full(struct intel_gt *gt) if (!i915_mmio_reg_offset(rb.reg)) continue;
+ if (GRAPHICS_VER(i915) == 12 && (engine->class == VIDEO_DECODE_CLASS || + engine->class == VIDEO_ENHANCEMENT_CLASS || + engine->class == COMPUTE_CLASS)) + rb.bit = _MASKED_BIT_ENABLE(rb.bit); + intel_uncore_write_fw(uncore, rb.reg, rb.bit); awake |= engine->mask; } diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index de89946c4817..f671ae5a3b7b 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -765,8 +765,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev) return -ESRCH; }
- kvm_get_kvm(vgpu->vfio_device.kvm); - if (__kvmgt_vgpu_exist(vgpu)) return -EEXIST;
@@ -777,6 +775,7 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
vgpu->track_node.track_write = kvmgt_page_track_write; vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot; + kvm_get_kvm(vgpu->vfio_device.kvm); kvm_page_track_register_notifier(vgpu->vfio_device.kvm, &vgpu->track_node);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 6748ec1e0005..a1f909dac89a 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -1093,6 +1093,10 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev) struct host1x *host1x = dev_get_drvdata(dev->dev.parent); struct iommu_domain *domain;
+ /* Our IOMMU usage policy doesn't currently play well with GART */ + if (of_machine_is_compatible("nvidia,tegra20")) + return false; + /* * If the Tegra DRM clients are backed by an IOMMU, push buffers are * likely to be allocated beyond the 32-bit boundary if sufficient diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 0cd3f97e7e49..f60ea24db0ec 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -292,6 +292,10 @@ static void host1x_setup_virtualization_tables(struct host1x *host)
static bool host1x_wants_iommu(struct host1x *host1x) { + /* Our IOMMU usage policy doesn't currently play well with GART */ + if (of_machine_is_compatible("nvidia,tegra20")) + return false; + /* * If we support addressing a maximum of 32 bits of physical memory * and if the host1x firewall is enabled, there's no need to enable diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 5b120402d405..cc23b90cae02 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -533,13 +533,17 @@ static void vmbus_add_channel_work(struct work_struct *work) * Add the new device to the bus. This will kick off device-driver * binding which eventually invokes the device driver's AddDevice() * method. + * + * If vmbus_device_register() fails, the 'device_obj' is freed in + * vmbus_device_release() as called by device_unregister() in the + * error path of vmbus_device_register(). In the outside error + * path, there's no need to free it. */ ret = vmbus_device_register(newchannel->device_obj);
if (ret != 0) { pr_err("unable to add child device object (relid %d)\n", newchannel->offermsg.child_relid); - kfree(newchannel->device_obj); goto err_deq_chan; }
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 3c833ea60db6..939ccf921e71 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -2083,6 +2083,7 @@ int vmbus_device_register(struct hv_device *child_device_obj) ret = device_register(&child_device_obj->device); if (ret) { pr_err("Unable to register child device\n"); + put_device(&child_device_obj->device); return ret; }
diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c index 29c9fa99c2bd..dea438b9eb9e 100644 --- a/drivers/iio/accel/bma400_core.c +++ b/drivers/iio/accel/bma400_core.c @@ -673,8 +673,10 @@ static int bma400_get_steps_reg(struct bma400_data *data, int *val)
ret = regmap_bulk_read(data->regmap, BMA400_STEP_CNT0_REG, steps_raw, BMA400_STEP_RAW_LEN); - if (ret) + if (ret) { + kfree(steps_raw); return ret; + } *val = get_unaligned_le24(steps_raw); kfree(steps_raw); return IIO_VAL_INT; diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c index 9341e0e0eb55..998e8bcc06e1 100644 --- a/drivers/iio/adc/aspeed_adc.c +++ b/drivers/iio/adc/aspeed_adc.c @@ -202,6 +202,8 @@ static int aspeed_adc_set_trim_data(struct iio_dev *indio_dev) ((scu_otp) & (data->model_data->trim_locate->field)) >> __ffs(data->model_data->trim_locate->field); + if (!trimming_val) + trimming_val = 0x8; } dev_dbg(data->dev, "trimming val = %d, offset = %08x, fields = %08x\n", @@ -563,12 +565,9 @@ static int aspeed_adc_probe(struct platform_device *pdev) if (ret) return ret;
- if (of_find_property(data->dev->of_node, "aspeed,trim-data-valid", - NULL)) { - ret = aspeed_adc_set_trim_data(indio_dev); - if (ret) - return ret; - } + ret = aspeed_adc_set_trim_data(indio_dev); + if (ret) + return ret;
if (of_find_property(data->dev->of_node, "aspeed,battery-sensing", NULL)) { diff --git a/drivers/iio/industrialio-sw-trigger.c b/drivers/iio/industrialio-sw-trigger.c index 994f03a71520..d86a3305d9e8 100644 --- a/drivers/iio/industrialio-sw-trigger.c +++ b/drivers/iio/industrialio-sw-trigger.c @@ -58,8 +58,12 @@ int iio_register_sw_trigger_type(struct iio_sw_trigger_type *t)
t->group = configfs_register_default_group(iio_triggers_group, t->name, &iio_trigger_type_group_type); - if (IS_ERR(t->group)) + if (IS_ERR(t->group)) { + mutex_lock(&iio_trigger_types_lock); + list_del(&t->list); + mutex_unlock(&iio_trigger_types_lock); ret = PTR_ERR(t->group); + }
return ret; } diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c index 09b831f9f40b..795224a38bef 100644 --- a/drivers/iio/light/apds9960.c +++ b/drivers/iio/light/apds9960.c @@ -54,9 +54,6 @@ #define APDS9960_REG_CONTROL_PGAIN_MASK_SHIFT 2
#define APDS9960_REG_CONFIG_2 0x90 -#define APDS9960_REG_CONFIG_2_GGAIN_MASK 0x60 -#define APDS9960_REG_CONFIG_2_GGAIN_MASK_SHIFT 5 - #define APDS9960_REG_ID 0x92
#define APDS9960_REG_STATUS 0x93 @@ -77,6 +74,9 @@ #define APDS9960_REG_GCONF_1_GFIFO_THRES_MASK_SHIFT 6
#define APDS9960_REG_GCONF_2 0xa3 +#define APDS9960_REG_GCONF_2_GGAIN_MASK 0x60 +#define APDS9960_REG_GCONF_2_GGAIN_MASK_SHIFT 5 + #define APDS9960_REG_GOFFSET_U 0xa4 #define APDS9960_REG_GOFFSET_D 0xa5 #define APDS9960_REG_GPULSE 0xa6 @@ -396,9 +396,9 @@ static int apds9960_set_pxs_gain(struct apds9960_data *data, int val) }
ret = regmap_update_bits(data->regmap, - APDS9960_REG_CONFIG_2, - APDS9960_REG_CONFIG_2_GGAIN_MASK, - idx << APDS9960_REG_CONFIG_2_GGAIN_MASK_SHIFT); + APDS9960_REG_GCONF_2, + APDS9960_REG_GCONF_2_GGAIN_MASK, + idx << APDS9960_REG_GCONF_2_GGAIN_MASK_SHIFT); if (!ret) data->pxs_gain = idx; mutex_unlock(&data->lock); diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index 480476121c01..09489380afda 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -18,6 +18,10 @@ #include <linux/gpio.h> #include <linux/platform_device.h>
+static bool use_low_level_irq; +module_param(use_low_level_irq, bool, 0444); +MODULE_PARM_DESC(use_low_level_irq, "Use low-level triggered IRQ instead of edge triggered"); + struct soc_button_info { const char *name; int acpi_index; @@ -73,6 +77,13 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"), }, }, + { + /* Acer Switch V 10 SW5-017, same issue as Acer Switch 10 SW5-012. */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "SW5-017"), + }, + }, { /* * Acer One S1003. _LID method messes with power-button GPIO @@ -164,7 +175,8 @@ soc_button_device_create(struct platform_device *pdev, }
/* See dmi_use_low_level_irq[] comment */ - if (!autorepeat && dmi_check_system(dmi_use_low_level_irq)) { + if (!autorepeat && (use_low_level_irq || + dmi_check_system(dmi_use_low_level_irq))) { irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW); gpio_keys[n_buttons].irq = irq; gpio_keys[n_buttons].gpio = -ENOENT; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index ffad142801b3..973a4c1d5d09 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -191,6 +191,7 @@ static const char * const smbus_pnp_ids[] = { "SYN3221", /* HP 15-ay000 */ "SYN323d", /* HP Spectre X360 13-w013dx */ "SYN3257", /* HP Envy 13-ad105ng */ + "SYN3286", /* HP Laptop 15-da3001TU */ NULL };
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 4fbec7bbecca..5043dc7b8fb3 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -114,18 +114,18 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_NEVER) }, { - /* ASUS ZenBook UX425UA */ + /* ASUS ZenBook UX425UA/QA */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"), + DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425"), }, .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER) }, { - /* ASUS ZenBook UM325UA */ + /* ASUS ZenBook UM325UA/QA */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"), + DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325"), }, .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER) }, diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index 21c0dddbe41d..25e6ba132bbc 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -1158,6 +1158,7 @@ static int goodix_configure_dev(struct goodix_ts_data *ts) input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0); input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+retry_read_config: /* Read configuration and apply touchscreen parameters */ goodix_read_config(ts);
@@ -1165,6 +1166,16 @@ static int goodix_configure_dev(struct goodix_ts_data *ts) touchscreen_parse_properties(ts->input_dev, true, &ts->prop);
if (!ts->prop.max_x || !ts->prop.max_y || !ts->max_touch_num) { + if (!ts->reset_controller_at_probe && + ts->irq_pin_access_method != IRQ_PIN_ACCESS_NONE) { + dev_info(&ts->client->dev, "Config not set, resetting controller\n"); + /* Retry after a controller reset */ + ts->reset_controller_at_probe = true; + error = goodix_reset(ts); + if (error) + return error; + goto retry_read_config; + } dev_err(&ts->client->dev, "Invalid config (%d, %d, %d), using defaults\n", ts->prop.max_x, ts->prop.max_y, ts->max_touch_num); diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index aaf2472df6e5..e97e9f97456d 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -263,6 +263,7 @@ struct dm_integrity_c {
struct completion crypto_backoff;
+ bool wrote_to_journal; bool journal_uptodate; bool just_formatted; bool recalculate_flag; @@ -2375,6 +2376,8 @@ static void integrity_commit(struct work_struct *w) if (!commit_sections) goto release_flush_bios;
+ ic->wrote_to_journal = true; + i = commit_start; for (n = 0; n < commit_sections; n++) { for (j = 0; j < ic->journal_section_entries; j++) { @@ -2591,10 +2594,6 @@ static void integrity_writer(struct work_struct *w)
unsigned prev_free_sectors;
- /* the following test is not needed, but it tests the replay code */ - if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev) - return; - spin_lock_irq(&ic->endio_wait.lock); write_start = ic->committed_section; write_sections = ic->n_committed_sections; @@ -3101,10 +3100,17 @@ static void dm_integrity_postsuspend(struct dm_target *ti) drain_workqueue(ic->commit_wq);
if (ic->mode == 'J') { - if (ic->meta_dev) - queue_work(ic->writer_wq, &ic->writer_work); + queue_work(ic->writer_wq, &ic->writer_work); drain_workqueue(ic->writer_wq); dm_integrity_flush_buffers(ic, true); + if (ic->wrote_to_journal) { + init_journal(ic, ic->free_section, + ic->journal_sections - ic->free_section, ic->commit_seq); + if (ic->free_section) { + init_journal(ic, 0, ic->free_section, + next_commit_seq(ic->commit_seq)); + } + } }
if (ic->mode == 'B') { @@ -3132,6 +3138,8 @@ static void dm_integrity_resume(struct dm_target *ti)
DEBUG_print("resume\n");
+ ic->wrote_to_journal = false; + if (ic->provided_data_sectors != old_provided_data_sectors) { if (ic->provided_data_sectors > old_provided_data_sectors && ic->mode == 'B' && @@ -3370,6 +3378,7 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT; limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT; blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT); + limits->dma_alignment = limits->logical_block_size - 1; } }
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 20fd688f72e7..178e13a5b059 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -875,6 +875,7 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev); limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev); limits->io_min = limits->physical_block_size; + limits->dma_alignment = limits->logical_block_size - 1; }
#if IS_ENABLED(CONFIG_FS_DAX) diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c index 24150c933fcb..dc3253b318da 100644 --- a/drivers/net/arcnet/com20020_cs.c +++ b/drivers/net/arcnet/com20020_cs.c @@ -113,6 +113,7 @@ static int com20020_probe(struct pcmcia_device *p_dev) struct com20020_dev *info; struct net_device *dev; struct arcnet_local *lp; + int ret = -ENOMEM;
dev_dbg(&p_dev->dev, "com20020_attach()\n");
@@ -142,12 +143,18 @@ static int com20020_probe(struct pcmcia_device *p_dev) info->dev = dev; p_dev->priv = info;
- return com20020_config(p_dev); + ret = com20020_config(p_dev); + if (ret) + goto fail_config; + + return 0;
+fail_config: + free_arcdev(dev); fail_alloc_dev: kfree(info); fail_alloc_info: - return -ENOMEM; + return ret; } /* com20020_attach */
static void com20020_detach(struct pcmcia_device *link) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 86d42306aa5e..76dd5ff1d99d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3231,16 +3231,23 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave) { struct slave *curr_active_slave, *curr_arp_slave; - struct icmp6hdr *hdr = icmp6_hdr(skb); struct in6_addr *saddr, *daddr; + struct { + struct ipv6hdr ip6; + struct icmp6hdr icmp6; + } *combined, _combined;
if (skb->pkt_type == PACKET_OTHERHOST || - skb->pkt_type == PACKET_LOOPBACK || - hdr->icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT) + skb->pkt_type == PACKET_LOOPBACK) + goto out; + + combined = skb_header_pointer(skb, 0, sizeof(_combined), &_combined); + if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP || + combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT) goto out;
- saddr = &ipv6_hdr(skb)->saddr; - daddr = &ipv6_hdr(skb)->daddr; + saddr = &combined->ip6.saddr; + daddr = &combined->ip6.saddr;
slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n", __func__, slave->dev->name, bond_slave_state(slave), diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index cd4115a1b81c..0deac073d9cf 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -268,8 +268,6 @@ struct gs_can {
struct usb_anchor tx_submitted; atomic_t active_tx_urbs; - void *rxbuf[GS_MAX_RX_URBS]; - dma_addr_t rxbuf_dma[GS_MAX_RX_URBS]; };
/* usb interface struct */ @@ -587,9 +585,6 @@ static void gs_usb_xmit_callback(struct urb *urb)
if (urb->status) netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id); - - usb_free_coherent(urb->dev, urb->transfer_buffer_length, - urb->transfer_buffer, urb->transfer_dma); }
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, @@ -618,8 +613,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, if (!urb) goto nomem_urb;
- hf = usb_alloc_coherent(dev->udev, dev->hf_size_tx, GFP_ATOMIC, - &urb->transfer_dma); + hf = kmalloc(dev->hf_size_tx, GFP_ATOMIC); if (!hf) { netdev_err(netdev, "No memory left for USB buffer\n"); goto nomem_hf; @@ -663,7 +657,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, hf, dev->hf_size_tx, gs_usb_xmit_callback, txc);
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &dev->tx_submitted);
can_put_echo_skb(skb, netdev, idx, 0); @@ -678,8 +672,6 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, gs_free_tx_context(txc);
usb_unanchor_urb(urb); - usb_free_coherent(dev->udev, urb->transfer_buffer_length, - urb->transfer_buffer, urb->transfer_dma);
if (rc == -ENODEV) { netif_device_detach(netdev); @@ -699,8 +691,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK;
badidx: - usb_free_coherent(dev->udev, urb->transfer_buffer_length, - urb->transfer_buffer, urb->transfer_dma); + kfree(hf); nomem_hf: usb_free_urb(urb);
@@ -744,7 +735,6 @@ static int gs_can_open(struct net_device *netdev) for (i = 0; i < GS_MAX_RX_URBS; i++) { struct urb *urb; u8 *buf; - dma_addr_t buf_dma;
/* alloc rx urb */ urb = usb_alloc_urb(0, GFP_KERNEL); @@ -752,10 +742,8 @@ static int gs_can_open(struct net_device *netdev) return -ENOMEM;
/* alloc rx buffer */ - buf = usb_alloc_coherent(dev->udev, - dev->parent->hf_size_rx, - GFP_KERNEL, - &buf_dma); + buf = kmalloc(dev->parent->hf_size_rx, + GFP_KERNEL); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); @@ -763,8 +751,6 @@ static int gs_can_open(struct net_device *netdev) return -ENOMEM; }
- urb->transfer_dma = buf_dma; - /* fill, anchor, and submit rx urb */ usb_fill_bulk_urb(urb, dev->udev, @@ -773,7 +759,7 @@ static int gs_can_open(struct net_device *netdev) buf, dev->parent->hf_size_rx, gs_usb_receive_bulk_callback, parent); - urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + urb->transfer_flags |= URB_FREE_BUFFER;
usb_anchor_urb(urb, &parent->rx_submitted);
@@ -786,17 +772,10 @@ static int gs_can_open(struct net_device *netdev) "usb_submit failed (err=%d)\n", rc);
usb_unanchor_urb(urb); - usb_free_coherent(dev->udev, - sizeof(struct gs_host_frame), - buf, - buf_dma); usb_free_urb(urb); break; }
- dev->rxbuf[i] = buf; - dev->rxbuf_dma[i] = buf_dma; - /* Drop reference, * USB core will take care of freeing it */ @@ -854,7 +833,6 @@ static int gs_can_close(struct net_device *netdev) int rc; struct gs_can *dev = netdev_priv(netdev); struct gs_usb *parent = dev->parent; - unsigned int i;
netif_stop_queue(netdev);
@@ -862,11 +840,6 @@ static int gs_can_close(struct net_device *netdev) parent->active_channels--; if (!parent->active_channels) { usb_kill_anchored_urbs(&parent->rx_submitted); - for (i = 0; i < GS_MAX_RX_URBS; i++) - usb_free_coherent(dev->udev, - sizeof(struct gs_host_frame), - dev->rxbuf[i], - dev->rxbuf_dma[i]); }
/* Stop sending URBs */ diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c index 215dd17ca790..4059fcc8c832 100644 --- a/drivers/net/dsa/sja1105/sja1105_mdio.c +++ b/drivers/net/dsa/sja1105/sja1105_mdio.c @@ -256,6 +256,9 @@ static int sja1105_base_tx_mdio_read(struct mii_bus *bus, int phy, int reg) u32 tmp; int rc;
+ if (reg & MII_ADDR_C45) + return -EOPNOTSUPP; + rc = sja1105_xfer_u32(priv, SPI_READ, regs->mdio_100base_tx + reg, &tmp, NULL); if (rc < 0) @@ -272,6 +275,9 @@ static int sja1105_base_tx_mdio_write(struct mii_bus *bus, int phy, int reg, const struct sja1105_regs *regs = priv->info->regs; u32 tmp = val;
+ if (reg & MII_ADDR_C45) + return -EOPNOTSUPP; + return sja1105_xfer_u32(priv, SPI_WRITE, regs->mdio_100base_tx + reg, &tmp, NULL); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 11d15cd03600..77d4cb4ad782 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -795,16 +795,20 @@ static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) { - struct pci_dev *dev; struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); + struct pci_dev *dev; + bool pending;
if (!vf) return false;
dev = pci_get_domain_bus_and_slot(vf->domain, vf->bus, vf->devfn); - if (dev) - return bnx2x_is_pcie_pending(dev); - return false; + if (!dev) + return false; + pending = bnx2x_is_pcie_pending(dev); + pci_dev_put(dev); + + return pending; }
int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index bf6a72143040..1e5dc0ea0e31 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1799,7 +1799,7 @@ static int liquidio_open(struct net_device *netdev)
ifstate_set(lio, LIO_IFSTATE_RUNNING);
- if (!OCTEON_CN23XX_PF(oct) || (OCTEON_CN23XX_PF(oct) && !oct->msix_on)) { + if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) { ret = setup_tx_poll_fn(netdev); if (ret) goto err_poll; @@ -1829,7 +1829,7 @@ static int liquidio_open(struct net_device *netdev) return 0;
err_rx_ctrl: - if (!OCTEON_CN23XX_PF(oct) || (OCTEON_CN23XX_PF(oct) && !oct->msix_on)) + if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) cleanup_tx_poll_fn(netdev); err_poll: if (lio->ptp_clock) { diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 2f6484dc186a..7eb2ddbe9bad 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1436,8 +1436,10 @@ static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl, return AE_OK; }
- if (strncmp(string.pointer, bgx_sel, 4)) + if (strncmp(string.pointer, bgx_sel, 4)) { + kfree(string.pointer); return AE_OK; + }
acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, bgx_acpi_register_phy, NULL, bgx, NULL); diff --git a/drivers/net/ethernet/davicom/dm9051.c b/drivers/net/ethernet/davicom/dm9051.c index a523ddda7609..de7105a84747 100644 --- a/drivers/net/ethernet/davicom/dm9051.c +++ b/drivers/net/ethernet/davicom/dm9051.c @@ -798,8 +798,10 @@ static int dm9051_loop_rx(struct board_info *db) }
ret = dm9051_stop_mrcmd(db); - if (ret) + if (ret) { + dev_kfree_skb(skb); return ret; + }
skb->protocol = eth_type_trans(skb, db->ndev); if (db->ndev->features & NETIF_F_RXCSUM) diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index a5f7152a1716..6a2617cc5490 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -504,6 +504,27 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) return (budget != 0); }
+static bool tsnep_tx_pending(struct tsnep_tx *tx) +{ + unsigned long flags; + struct tsnep_tx_entry *entry; + bool pending = false; + + spin_lock_irqsave(&tx->lock, flags); + + if (tx->read != tx->write) { + entry = &tx->entry[tx->read]; + if ((__le32_to_cpu(entry->desc_wb->properties) & + TSNEP_TX_DESC_OWNER_MASK) == + (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) + pending = true; + } + + spin_unlock_irqrestore(&tx->lock, flags); + + return pending; +} + static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr, struct tsnep_tx *tx) { @@ -751,6 +772,19 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, return done; }
+static bool tsnep_rx_pending(struct tsnep_rx *rx) +{ + struct tsnep_rx_entry *entry; + + entry = &rx->entry[rx->read]; + if ((__le32_to_cpu(entry->desc_wb->properties) & + TSNEP_DESC_OWNER_COUNTER_MASK) == + (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) + return true; + + return false; +} + static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr, struct tsnep_rx *rx) { @@ -795,6 +829,17 @@ static void tsnep_rx_close(struct tsnep_rx *rx) tsnep_rx_ring_cleanup(rx); }
+static bool tsnep_pending(struct tsnep_queue *queue) +{ + if (queue->tx && tsnep_tx_pending(queue->tx)) + return true; + + if (queue->rx && tsnep_rx_pending(queue->rx)) + return true; + + return false; +} + static int tsnep_poll(struct napi_struct *napi, int budget) { struct tsnep_queue *queue = container_of(napi, struct tsnep_queue, @@ -815,9 +860,19 @@ static int tsnep_poll(struct napi_struct *napi, int budget) if (!complete) return budget;
- if (likely(napi_complete_done(napi, done))) + if (likely(napi_complete_done(napi, done))) { tsnep_enable_irq(queue->adapter, queue->irq_mask);
+ /* reschedule if work is already pending, prevent rotten packets + * which are transmitted or received after polling but before + * interrupt enable + */ + if (tsnep_pending(queue)) { + tsnep_disable_irq(queue->adapter, queue->irq_mask); + napi_schedule(napi); + } + } + return min(done, budget - 1); }
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index d0fd3045ce11..1d8ec1b120a1 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -2058,7 +2058,7 @@ static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) /* enable Tx ints by setting pkt thr to 1 */ enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
- tbmr = ENETC_TBMR_EN; + tbmr = ENETC_TBMR_EN | ENETC_TBMR_SET_PRIO(tx_ring->prio); if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) tbmr |= ENETC_TBMR_VIH;
@@ -2121,13 +2121,14 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
static void enetc_setup_bdrs(struct enetc_ndev_priv *priv) { + struct enetc_hw *hw = &priv->si->hw; int i;
for (i = 0; i < priv->num_tx_rings; i++) - enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]); + enetc_setup_txbdr(hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++) - enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]); + enetc_setup_rxbdr(hw, priv->rx_ring[i]); }
static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) @@ -2160,13 +2161,14 @@ static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
static void enetc_clear_bdrs(struct enetc_ndev_priv *priv) { + struct enetc_hw *hw = &priv->si->hw; int i;
for (i = 0; i < priv->num_tx_rings; i++) - enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]); + enetc_clear_txbdr(hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++) - enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]); + enetc_clear_rxbdr(hw, priv->rx_ring[i]);
udelay(1); } @@ -2174,13 +2176,13 @@ static void enetc_clear_bdrs(struct enetc_ndev_priv *priv) static int enetc_setup_irqs(struct enetc_ndev_priv *priv) { struct pci_dev *pdev = priv->si->pdev; + struct enetc_hw *hw = &priv->si->hw; int i, j, err;
for (i = 0; i < priv->bdr_int_num; i++) { int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); struct enetc_int_vector *v = priv->int_vector[i]; int entry = ENETC_BDR_INT_BASE_IDX + i; - struct enetc_hw *hw = &priv->si->hw;
snprintf(v->name, sizeof(v->name), "%s-rxtx%d", priv->ndev->name, i); @@ -2268,13 +2270,14 @@ static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
static void enetc_clear_interrupts(struct enetc_ndev_priv *priv) { + struct enetc_hw *hw = &priv->si->hw; int i;
for (i = 0; i < priv->num_tx_rings; i++) - enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0); + enetc_txbdr_wr(hw, i, ENETC_TBIER, 0);
for (i = 0; i < priv->num_rx_rings; i++) - enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0); + enetc_rxbdr_wr(hw, i, ENETC_RBIER, 0); }
static int enetc_phylink_connect(struct net_device *ndev) @@ -2441,6 +2444,7 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) { struct enetc_ndev_priv *priv = netdev_priv(ndev); struct tc_mqprio_qopt *mqprio = type_data; + struct enetc_hw *hw = &priv->si->hw; struct enetc_bdr *tx_ring; int num_stack_tx_queues; u8 num_tc; @@ -2457,7 +2461,8 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) /* Reset all ring priorities to 0 */ for (i = 0; i < priv->num_tx_rings; i++) { tx_ring = priv->tx_ring[i]; - enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0); + tx_ring->prio = 0; + enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); }
return 0; @@ -2476,7 +2481,8 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) */ for (i = 0; i < num_tc; i++) { tx_ring = priv->tx_ring[i]; - enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i); + tx_ring->prio = i; + enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); }
/* Reset the number of netdev queues based on the TC count */ @@ -2589,19 +2595,21 @@ static int enetc_set_rss(struct net_device *ndev, int en) static void enetc_enable_rxvlan(struct net_device *ndev, bool en) { struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; int i;
for (i = 0; i < priv->num_rx_rings; i++) - enetc_bdr_enable_rxvlan(&priv->si->hw, i, en); + enetc_bdr_enable_rxvlan(hw, i, en); }
static void enetc_enable_txvlan(struct net_device *ndev, bool en) { struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; int i;
for (i = 0; i < priv->num_tx_rings; i++) - enetc_bdr_enable_txvlan(&priv->si->hw, i, en); + enetc_bdr_enable_txvlan(hw, i, en); }
void enetc_set_features(struct net_device *ndev, netdev_features_t features) diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 2cfe6944ebd3..bb1b3b0e40e4 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -95,6 +95,7 @@ struct enetc_bdr { void __iomem *rcir; }; u16 index; + u16 prio; int bd_count; /* # of BDs */ int next_to_use; int next_to_clean; @@ -467,19 +468,20 @@ int enetc_set_psfp(struct net_device *ndev, bool en);
static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv) { + struct enetc_hw *hw = &priv->si->hw; u32 reg;
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSIDCAPR); + reg = enetc_port_rd(hw, ENETC_PSIDCAPR); priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK; /* Port stream filter capability */ - reg = enetc_port_rd(&priv->si->hw, ENETC_PSFCAPR); + reg = enetc_port_rd(hw, ENETC_PSFCAPR); priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK; /* Port stream gate capability */ - reg = enetc_port_rd(&priv->si->hw, ENETC_PSGCAPR); + reg = enetc_port_rd(hw, ENETC_PSGCAPR); priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK); priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16; /* Port flow meter capability */ - reg = enetc_port_rd(&priv->si->hw, ENETC_PFMCAPR); + reg = enetc_port_rd(hw, ENETC_PFMCAPR); priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK; }
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index f8a2f02ce22d..5fcb02b00699 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -17,8 +17,9 @@ static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed) { + struct enetc_hw *hw = &priv->si->hw; u32 old_speed = priv->speed; - u32 pspeed; + u32 pspeed, tmp;
if (speed == old_speed) return; @@ -39,16 +40,15 @@ void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed) }
priv->speed = speed; - enetc_port_wr(&priv->si->hw, ENETC_PMR, - (enetc_port_rd(&priv->si->hw, ENETC_PMR) - & (~ENETC_PMR_PSPEED_MASK)) - | pspeed); + tmp = enetc_port_rd(hw, ENETC_PMR); + enetc_port_wr(hw, ENETC_PMR, (tmp & ~ENETC_PMR_PSPEED_MASK) | pspeed); }
static int enetc_setup_taprio(struct net_device *ndev, struct tc_taprio_qopt_offload *admin_conf) { struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; struct enetc_cbd cbd = {.cmd = 0}; struct tgs_gcl_conf *gcl_config; struct tgs_gcl_data *gcl_data; @@ -61,15 +61,13 @@ static int enetc_setup_taprio(struct net_device *ndev, int err; int i;
- if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw)) + if (admin_conf->num_entries > enetc_get_max_gcl_len(hw)) return -EINVAL; gcl_len = admin_conf->num_entries;
- tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET); + tge = enetc_rd(hw, ENETC_QBV_PTGCR_OFFSET); if (!admin_conf->enable) { - enetc_wr(&priv->si->hw, - ENETC_QBV_PTGCR_OFFSET, - tge & (~ENETC_QBV_TGE)); + enetc_wr(hw, ENETC_QBV_PTGCR_OFFSET, tge & ~ENETC_QBV_TGE);
priv->active_offloads &= ~ENETC_F_QBV;
@@ -117,14 +115,11 @@ static int enetc_setup_taprio(struct net_device *ndev, cbd.cls = BDCR_CMD_PORT_GCL; cbd.status_flags = 0;
- enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET, - tge | ENETC_QBV_TGE); + enetc_wr(hw, ENETC_QBV_PTGCR_OFFSET, tge | ENETC_QBV_TGE);
err = enetc_send_cmd(priv->si, &cbd); if (err) - enetc_wr(&priv->si->hw, - ENETC_QBV_PTGCR_OFFSET, - tge & (~ENETC_QBV_TGE)); + enetc_wr(hw, ENETC_QBV_PTGCR_OFFSET, tge & ~ENETC_QBV_TGE);
enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
@@ -138,6 +133,8 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data) { struct tc_taprio_qopt_offload *taprio = type_data; struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + struct enetc_bdr *tx_ring; int err; int i;
@@ -146,18 +143,20 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data) if (priv->tx_ring[i]->tsd_enable) return -EBUSY;
- for (i = 0; i < priv->num_tx_rings; i++) - enetc_set_bdr_prio(&priv->si->hw, - priv->tx_ring[i]->index, - taprio->enable ? i : 0); + for (i = 0; i < priv->num_tx_rings; i++) { + tx_ring = priv->tx_ring[i]; + tx_ring->prio = taprio->enable ? i : 0; + enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); + }
err = enetc_setup_taprio(ndev, taprio); - - if (err) - for (i = 0; i < priv->num_tx_rings; i++) - enetc_set_bdr_prio(&priv->si->hw, - priv->tx_ring[i]->index, - taprio->enable ? 0 : i); + if (err) { + for (i = 0; i < priv->num_tx_rings; i++) { + tx_ring = priv->tx_ring[i]; + tx_ring->prio = taprio->enable ? 0 : i; + enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); + } + }
return err; } @@ -178,7 +177,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) struct tc_cbs_qopt_offload *cbs = type_data; u32 port_transmit_rate = priv->speed; u8 tc_nums = netdev_get_num_tc(ndev); - struct enetc_si *si = priv->si; + struct enetc_hw *hw = &priv->si->hw; u32 hi_credit_bit, hi_credit_reg; u32 max_interference_size; u32 port_frame_max_size; @@ -199,15 +198,15 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) * lower than this TC have been disabled. */ if (tc == prio_top && - enetc_get_cbs_enable(&si->hw, prio_next)) { + enetc_get_cbs_enable(hw, prio_next)) { dev_err(&ndev->dev, "Disable TC%d before disable TC%d\n", prio_next, tc); return -EINVAL; }
- enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0); - enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0); + enetc_port_wr(hw, ENETC_PTCCBSR1(tc), 0); + enetc_port_wr(hw, ENETC_PTCCBSR0(tc), 0);
return 0; } @@ -224,13 +223,13 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) * higher than this TC have been enabled. */ if (tc == prio_next) { - if (!enetc_get_cbs_enable(&si->hw, prio_top)) { + if (!enetc_get_cbs_enable(hw, prio_top)) { dev_err(&ndev->dev, "Enable TC%d first before enable TC%d\n", prio_top, prio_next); return -EINVAL; } - bw_sum += enetc_get_cbs_bw(&si->hw, prio_top); + bw_sum += enetc_get_cbs_bw(hw, prio_top); }
if (bw_sum + bw >= 100) { @@ -239,7 +238,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) return -EINVAL; }
- enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc)); + enetc_port_rd(hw, ENETC_PTCMSDUR(tc));
/* For top prio TC, the max_interfrence_size is maxSizedFrame. * @@ -259,8 +258,8 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) u32 m0, ma, r0, ra;
m0 = port_frame_max_size * 8; - ma = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(prio_top)) * 8; - ra = enetc_get_cbs_bw(&si->hw, prio_top) * + ma = enetc_port_rd(hw, ENETC_PTCMSDUR(prio_top)) * 8; + ra = enetc_get_cbs_bw(hw, prio_top) * port_transmit_rate * 10000ULL; r0 = port_transmit_rate * 1000000ULL; max_interference_size = m0 + ma + @@ -280,10 +279,10 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit, port_transmit_rate * 1000000ULL);
- enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit_reg); + enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
/* Set bw register and enable this traffic class */ - enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE); + enetc_port_wr(hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
return 0; } @@ -293,6 +292,7 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data) struct enetc_ndev_priv *priv = netdev_priv(ndev); struct tc_etf_qopt_offload *qopt = type_data; u8 tc_nums = netdev_get_num_tc(ndev); + struct enetc_hw *hw = &priv->si->hw; int tc;
if (!tc_nums) @@ -304,12 +304,11 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data) return -EINVAL;
/* TSD and Qbv are mutually exclusive in hardware */ - if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE) + if (enetc_rd(hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE) return -EBUSY;
priv->tx_ring[tc]->tsd_enable = qopt->enable; - enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc), - qopt->enable ? ENETC_TSDE : 0); + enetc_port_wr(hw, ENETC_PTCTSDR(tc), qopt->enable ? ENETC_TSDE : 0);
return 0; } diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 3f6187c16424..0d1bab4ac1b0 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -298,7 +298,6 @@ struct iavf_adapter { #define IAVF_FLAG_QUEUES_DISABLED BIT(17) #define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18) #define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20) -#define IAVF_FLAG_INITIAL_MAC_SET BIT(23) /* duplicates for common code */ #define IAVF_FLAG_DCB_ENABLED 0 /* flags for admin queue service task */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 79fef8c59d65..cff03723f4f9 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1087,12 +1087,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p) if (ret) return ret;
- /* If this is an initial set MAC during VF spawn do not wait */ - if (adapter->flags & IAVF_FLAG_INITIAL_MAC_SET) { - adapter->flags &= ~IAVF_FLAG_INITIAL_MAC_SET; - return 0; - } - ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, iavf_is_mac_set_handled(netdev, addr->sa_data), msecs_to_jiffies(2500)); @@ -2605,8 +2599,6 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter) ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); }
- adapter->flags |= IAVF_FLAG_INITIAL_MAC_SET; - adapter->tx_desc_count = IAVF_DEFAULT_TXD; adapter->rx_desc_count = IAVF_DEFAULT_RXD; err = iavf_init_interrupt_scheme(adapter); @@ -2921,7 +2913,6 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) iavf_free_queues(adapter); memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); iavf_shutdown_adminq(&adapter->hw); - adapter->netdev->flags &= ~IFF_UP; adapter->flags &= ~IAVF_FLAG_RESET_PENDING; iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); @@ -3021,6 +3012,11 @@ static void iavf_reset_task(struct work_struct *work) iavf_disable_vf(adapter); mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); + if (netif_running(netdev)) { + rtnl_lock(); + dev_close(netdev); + rtnl_unlock(); + } return; /* Do not attempt to reinit. It's dead, Jim. */ }
@@ -3033,6 +3029,7 @@ static void iavf_reset_task(struct work_struct *work)
if (running) { netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); adapter->link_up = false; iavf_napi_disable_all(adapter); } @@ -3172,6 +3169,16 @@ static void iavf_reset_task(struct work_struct *work)
mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); + + if (netif_running(netdev)) { + /* Close device to ensure that Tx queues will not be started + * during netif_device_attach() at the end of the reset task. + */ + rtnl_lock(); + dev_close(netdev); + rtnl_unlock(); + } + dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); reset_finish: rtnl_lock(); @@ -5035,23 +5042,21 @@ static int __maybe_unused iavf_resume(struct device *dev_d) static void iavf_remove(struct pci_dev *pdev) { struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); - struct net_device *netdev = adapter->netdev; struct iavf_fdir_fltr *fdir, *fdirtmp; struct iavf_vlan_filter *vlf, *vlftmp; + struct iavf_cloud_filter *cf, *cftmp; struct iavf_adv_rss *rss, *rsstmp; struct iavf_mac_filter *f, *ftmp; - struct iavf_cloud_filter *cf, *cftmp; - struct iavf_hw *hw = &adapter->hw; + struct net_device *netdev; + struct iavf_hw *hw; int err;
- /* When reboot/shutdown is in progress no need to do anything - * as the adapter is already REMOVE state that was set during - * iavf_shutdown() callback. - */ - if (adapter->state == __IAVF_REMOVE) + netdev = adapter->netdev; + hw = &adapter->hw; + + if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) return;
- set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section); /* Wait until port initialization is complete. * There are flows where register/unregister netdev may race. */ diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index eaa51cd7456b..8f86be995092 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -7352,6 +7352,7 @@ static int mvpp2_get_sram(struct platform_device *pdev, struct mvpp2 *priv) { struct resource *res; + void __iomem *base;
res = platform_get_resource(pdev, IORESOURCE_MEM, 2); if (!res) { @@ -7362,9 +7363,12 @@ static int mvpp2_get_sram(struct platform_device *pdev, return 0; }
- priv->cm3_base = devm_ioremap_resource(&pdev->dev, res); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base);
- return PTR_ERR_OR_ZERO(priv->cm3_base); + priv->cm3_base = base; + return 0; }
static int mvpp2_probe(struct platform_device *pdev) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index f42a09f04b25..70cda1571324 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -535,6 +535,8 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) sprintf(lmac, "LMAC%d", lmac_id); seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n", dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac); + + pci_dev_put(pdev); } return 0; } @@ -2221,6 +2223,7 @@ static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id) } }
+ pci_dev_put(pdev); return 0; }
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 0879a48411f3..3dc90060d70d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -4979,6 +4979,8 @@ static int nix_setup_ipolicers(struct rvu *rvu, ipolicer->ref_count = devm_kcalloc(rvu->dev, ipolicer->band_prof.max, sizeof(u16), GFP_KERNEL); + if (!ipolicer->ref_count) + return -ENOMEM; }
/* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c index b04fb226f708..ae50d56258ec 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c @@ -62,15 +62,18 @@ int rvu_sdp_init(struct rvu *rvu) pfvf->sdp_info = devm_kzalloc(rvu->dev, sizeof(struct sdp_node_info), GFP_KERNEL); - if (!pfvf->sdp_info) + if (!pfvf->sdp_info) { + pci_dev_put(pdev); return -ENOMEM; + }
dev_info(rvu->dev, "SDP PF number:%d\n", sdp_pf_num[i]);
- put_device(&pdev->dev); i++; }
+ pci_dev_put(pdev); + return 0; }
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index a0ad0bcbf89f..9f588ecba93e 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -730,6 +730,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id) return 0;
err_sfp_bind: + unregister_netdev(dev); err_register_netdev: prestera_port_list_del(port); err_port_init: diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 84433f3a3e22..5380caf0acc2 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2363,8 +2363,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) data + NET_SKB_PAD + eth->ip_align, ring->buf_size, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(eth->dma_dev, - dma_addr))) + dma_addr))) { + skb_free_frag(data); return -ENOMEM; + } } rxd->rxd1 = (unsigned int)dma_addr; ring->data[i] = data; @@ -2979,8 +2981,10 @@ static int mtk_open(struct net_device *dev) u32 gdm_config = MTK_GDMA_TO_PDMA;
err = mtk_start_dma(eth); - if (err) + if (err) { + phylink_disconnect_phy(mac->phylink); return err; + }
if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0) gdm_config = MTK_GDMA_TO_PPE; @@ -4103,12 +4107,12 @@ static int mtk_probe(struct platform_device *pdev) eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2); if (!eth->ppe) { err = -ENOMEM; - goto err_free_dev; + goto err_deinit_mdio; }
err = mtk_eth_offload_init(eth); if (err) - goto err_free_dev; + goto err_deinit_mdio; }
for (i = 0; i < MTK_MAX_DEVS; i++) { diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index b149e601f673..48cfaa7eaf50 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -697,7 +697,8 @@ static int mlx4_create_zones(struct mlx4_dev *dev, err = mlx4_bitmap_init(*bitmap + k, 1, MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0, 0); - mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0); + if (!err) + mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0); }
if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 2e0d59ca62b5..74bd05e5dda2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -45,6 +45,8 @@ #include "mlx5_core.h" #include "lib/eq.h" #include "lib/tout.h" +#define CREATE_TRACE_POINTS +#include "diag/cmd_tracepoint.h"
enum { CMD_IF_REV = 5, @@ -785,27 +787,14 @@ EXPORT_SYMBOL(mlx5_cmd_out_err); static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out) { u16 opcode, op_mod; - u32 syndrome; - u8 status; u16 uid; - int err; - - syndrome = MLX5_GET(mbox_out, out, syndrome); - status = MLX5_GET(mbox_out, out, status);
opcode = MLX5_GET(mbox_in, in, opcode); op_mod = MLX5_GET(mbox_in, in, op_mod); uid = MLX5_GET(mbox_in, in, uid);
- err = cmd_status_to_err(status); - if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY) mlx5_cmd_out_err(dev, opcode, op_mod, out); - else - mlx5_core_dbg(dev, - "%s(0x%x) op_mod(0x%x) uid(%d) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n", - mlx5_command_str(opcode), opcode, op_mod, uid, - cmd_status_str(status), status, syndrome, err); }
int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out) @@ -1016,6 +1005,7 @@ static void cmd_work_handler(struct work_struct *work) cmd_ent_get(ent); set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
+ cmd_ent_get(ent); /* for the _real_ FW event on completion */ /* Skip sending command to fw if internal error */ if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) { ent->ret = -ENXIO; @@ -1023,7 +1013,6 @@ static void cmd_work_handler(struct work_struct *work) return; }
- cmd_ent_get(ent); /* for the _real_ FW event on completion */ /* ring doorbell after the descriptor is valid */ mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); wmb(); @@ -1672,8 +1661,8 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force cmd_ent_put(ent); /* timeout work was canceled */
if (!forced || /* Real FW completion */ - pci_channel_offline(dev->pdev) || /* FW is inaccessible */ - dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) + mlx5_cmd_is_down(dev) || /* No real FW completion is expected */ + !opcode_allowed(cmd, ent->op)) cmd_ent_put(ent);
ent->ts2 = ktime_get_ns(); @@ -1892,6 +1881,16 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, return err; }
+static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out) +{ + u32 syndrome = MLX5_GET(mbox_out, out, syndrome); + u8 status = MLX5_GET(mbox_out, out, status); + + trace_mlx5_cmd(mlx5_command_str(opcode), opcode, op_mod, + cmd_status_str(status), status, syndrome, + cmd_status_to_err(status)); +} + static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, u32 syndrome, int err) { @@ -1914,7 +1913,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, }
/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */ -static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *out) +static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op_mod, void *out) { u32 syndrome = MLX5_GET(mbox_out, out, syndrome); u8 status = MLX5_GET(mbox_out, out, status); @@ -1922,8 +1921,10 @@ static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void * if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */ err = -EIO;
- if (!err && status != MLX5_CMD_STAT_OK) + if (!err && status != MLX5_CMD_STAT_OK) { err = -EREMOTEIO; + mlx5_cmd_err_trace(dev, opcode, op_mod, out); + }
cmd_status_log(dev, opcode, status, syndrome, err); return err; @@ -1951,9 +1952,9 @@ int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int { int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false); u16 opcode = MLX5_GET(mbox_in, in, opcode); + u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
- err = cmd_status_err(dev, err, opcode, out); - return err; + return cmd_status_err(dev, err, opcode, op_mod, out); } EXPORT_SYMBOL(mlx5_cmd_do);
@@ -1997,8 +1998,9 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, { int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true); u16 opcode = MLX5_GET(mbox_in, in, opcode); + u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
- err = cmd_status_err(dev, err, opcode, out); + err = cmd_status_err(dev, err, opcode, op_mod, out); return mlx5_cmd_check(dev, err, in, out); } EXPORT_SYMBOL(mlx5_cmd_exec_polling); @@ -2034,7 +2036,7 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work) struct mlx5_async_ctx *ctx;
ctx = work->ctx; - status = cmd_status_err(ctx->dev, status, work->opcode, work->out); + status = cmd_status_err(ctx->dev, status, work->opcode, work->op_mod, work->out); work->user_callback(status, work); if (atomic_dec_and_test(&ctx->num_inflight)) complete(&ctx->inflight_done); @@ -2049,6 +2051,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, work->ctx = ctx; work->user_callback = callback; work->opcode = MLX5_GET(mbox_in, in, opcode); + work->op_mod = MLX5_GET(mbox_in, in, op_mod); work->out = out; if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight))) return -EIO; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h new file mode 100644 index 000000000000..406ebe17405f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mlx5 + +#if !defined(_MLX5_CMD_TP_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _MLX5_CMD_TP_H_ + +#include <linux/tracepoint.h> +#include <linux/trace_seq.h> + +TRACE_EVENT(mlx5_cmd, + TP_PROTO(const char *command_str, u16 opcode, u16 op_mod, + const char *status_str, u8 status, u32 syndrome, int err), + TP_ARGS(command_str, opcode, op_mod, status_str, status, syndrome, err), + TP_STRUCT__entry(__string(command_str, command_str) + __field(u16, opcode) + __field(u16, op_mod) + __string(status_str, status_str) + __field(u8, status) + __field(u32, syndrome) + __field(int, err) + ), + TP_fast_assign(__assign_str(command_str, command_str); + __entry->opcode = opcode; + __entry->op_mod = op_mod; + __assign_str(status_str, status_str); + __entry->status = status; + __entry->syndrome = syndrome; + __entry->err = err; + ), + TP_printk("%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)", + __get_str(command_str), __entry->opcode, __entry->op_mod, + __get_str(status_str), __entry->status, __entry->syndrome, + __entry->err) +); + +#endif /* _MLX5_CMD_TP_H_ */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ./diag +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE cmd_tracepoint +#include <trace/define_trace.h> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 978a2bb8e122..21831386b26e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -638,7 +638,7 @@ static void mlx5_tracer_handle_timestamp_trace(struct mlx5_fw_tracer *tracer, trace_timestamp = (timestamp_event.timestamp & MASK_52_7) | (str_frmt->timestamp & MASK_6_0); else - trace_timestamp = ((timestamp_event.timestamp & MASK_52_7) - 1) | + trace_timestamp = ((timestamp_event.timestamp - 1) & MASK_52_7) | (str_frmt->timestamp & MASK_6_0);
mlx5_tracer_print_trace(str_frmt, dev, trace_timestamp); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 5aff97914367..ff73d25bc6eb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -224,15 +224,16 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, list_for_each_entry(flow, flow_list, tmp_list) { if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW)) continue; - spec = &flow->attr->parse_attr->spec; - - /* update from encap rule to slow path rule */ - rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
attr = mlx5e_tc_get_encap_attr(flow); esw_attr = attr->esw_attr; /* mark the flow's encap dest as non-valid */ esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID; + esw_attr->dests[flow->tmp_entry_index].pkt_reformat = NULL; + + /* update from encap rule to slow path rule */ + spec = &flow->attr->parse_attr->spec; + rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -251,6 +252,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, /* we know that the encap is valid */ e->flags &= ~MLX5_ENCAP_ENTRY_VALID; mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat); + e->pkt_reformat = NULL; }
static void mlx5e_take_tmp_flow(struct mlx5e_tc_flow *flow, @@ -762,8 +764,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, struct net_device *mirred_dev, int out_index, struct netlink_ext_ack *extack, - struct net_device **encap_dev, - bool *encap_valid) + struct net_device **encap_dev) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_tc_flow_parse_attr *parse_attr; @@ -878,9 +879,8 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, if (e->flags & MLX5_ENCAP_ENTRY_VALID) { attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat; attr->esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID; - *encap_valid = true; } else { - *encap_valid = false; + flow_flag_set(flow, SLOW); } mutex_unlock(&esw->offloads.encap_tbl_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h index d542b8476491..8ad273dde40e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h @@ -17,8 +17,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, struct net_device *mirred_dev, int out_index, struct netlink_ext_ack *extack, - struct net_device **encap_dev, - bool *encap_valid); + struct net_device **encap_dev);
int mlx5e_attach_decap(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 229c14b1af00..949ef560df78 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1620,7 +1620,6 @@ set_encap_dests(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr, struct netlink_ext_ack *extack, - bool *encap_valid, bool *vf_tun) { struct mlx5e_tc_flow_parse_attr *parse_attr; @@ -1637,7 +1636,6 @@ set_encap_dests(struct mlx5e_priv *priv, parse_attr = attr->parse_attr; esw_attr = attr->esw_attr; *vf_tun = false; - *encap_valid = true;
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { struct net_device *out_dev; @@ -1654,7 +1652,7 @@ set_encap_dests(struct mlx5e_priv *priv, goto out; } err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index, - extack, &encap_dev, encap_valid); + extack, &encap_dev); dev_put(out_dev); if (err) goto out; @@ -1718,8 +1716,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5_flow_attr *attr = flow->attr; struct mlx5_esw_flow_attr *esw_attr; - bool vf_tun, encap_valid; u32 max_prio, max_chain; + bool vf_tun; int err = 0;
parse_attr = attr->parse_attr; @@ -1809,7 +1807,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, esw_attr->int_port = int_port; }
- err = set_encap_dests(priv, flow, attr, extack, &encap_valid, &vf_tun); + err = set_encap_dests(priv, flow, attr, extack, &vf_tun); if (err) goto err_out;
@@ -1839,7 +1837,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, * (1) there's no error * (2) there's an encap action and we don't have valid neigh */ - if (!encap_valid || flow_flag_test(flow, SLOW)) + if (flow_flag_test(flow, SLOW)) flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec); else flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr); @@ -3737,7 +3735,7 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) struct mlx5e_post_act *post_act = get_post_action(flow->priv); struct mlx5_flow_attr *attr, *next_attr = NULL; struct mlx5e_post_act_handle *handle; - bool vf_tun, encap_valid = true; + bool vf_tun; int err;
/* This is going in reverse order as needed. @@ -3759,13 +3757,10 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) if (list_is_last(&attr->list, &flow->attrs)) break;
- err = set_encap_dests(flow->priv, flow, attr, extack, &encap_valid, &vf_tun); + err = set_encap_dests(flow->priv, flow, attr, extack, &vf_tun); if (err) goto out_free;
- if (!encap_valid) - flow_flag_set(flow, SLOW); - err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack); if (err) goto out_free; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 3c68cac4a9c2..061ac8799354 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -431,7 +431,7 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f mlx5_lag_mpesw_is_activated(esw->dev)) dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK; } - if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) { + if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) { if (pkt_reformat) { flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index 9d908a0ccfef..1e46f9afa40e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -9,7 +9,8 @@ enum { MLX5_FW_RESET_FLAGS_RESET_REQUESTED, MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, MLX5_FW_RESET_FLAGS_PENDING_COMP, - MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS + MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, + MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED };
struct mlx5_fw_reset { @@ -406,7 +407,7 @@ static void mlx5_sync_reset_now_event(struct work_struct *work) err = mlx5_pci_link_toggle(dev); if (err) { mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, no reset done, err %d\n", err); - goto done; + set_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags); }
mlx5_enter_error_state(dev, true); @@ -482,6 +483,10 @@ int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev) goto out; } err = fw_reset->ret; + if (test_and_clear_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags)) { + mlx5_unload_one_devl_locked(dev); + mlx5_load_one_devl_locked(dev, false); + } out: clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index e5e32430b6af..ac178796e484 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1759,7 +1759,8 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, res = state == pci_channel_io_perm_failure ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
- mlx5_pci_trace(dev, "Exit, result = %d, %s\n", res, result2str(res)); + mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Exit, result = %d, %s\n", + __func__, dev->state, dev->pci_status, res, result2str(res)); return res; }
@@ -1798,7 +1799,8 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) struct mlx5_core_dev *dev = pci_get_drvdata(pdev); int err;
- mlx5_pci_trace(dev, "Enter\n"); + mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Enter\n", + __func__, dev->state, dev->pci_status);
err = mlx5_pci_enable_device(dev); if (err) { @@ -1820,7 +1822,8 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
res = PCI_ERS_RESULT_RECOVERED; out: - mlx5_pci_trace(dev, "Exit, err = %d, result = %d, %s\n", err, res, result2str(res)); + mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Exit, err = %d, result = %d, %s\n", + __func__, dev->state, dev->pci_status, err, res, result2str(res)); return res; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c index 7da012ff0d41..8e2abbab05f0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c @@ -18,6 +18,10 @@ struct mlx5_sf_dev_table { phys_addr_t base_address; u64 sf_bar_length; struct notifier_block nb; + struct mutex table_lock; /* Serializes sf life cycle and vhca state change handler */ + struct workqueue_struct *active_wq; + struct work_struct work; + u8 stop_active_wq:1; struct mlx5_core_dev *dev; };
@@ -168,6 +172,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_ return 0;
sf_index = event->function_id - base_id; + mutex_lock(&table->table_lock); sf_dev = xa_load(&table->devices, sf_index); switch (event->new_vhca_state) { case MLX5_VHCA_STATE_INVALID: @@ -191,6 +196,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_ default: break; } + mutex_unlock(&table->table_lock); return 0; }
@@ -215,6 +221,78 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table) return 0; }
+static void mlx5_sf_dev_add_active_work(struct work_struct *work) +{ + struct mlx5_sf_dev_table *table = container_of(work, struct mlx5_sf_dev_table, work); + u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {}; + struct mlx5_core_dev *dev = table->dev; + u16 max_functions; + u16 function_id; + u16 sw_func_id; + int err = 0; + u8 state; + int i; + + max_functions = mlx5_sf_max_functions(dev); + function_id = MLX5_CAP_GEN(dev, sf_base_id); + for (i = 0; i < max_functions; i++, function_id++) { + if (table->stop_active_wq) + return; + err = mlx5_cmd_query_vhca_state(dev, function_id, out, sizeof(out)); + if (err) + /* A failure of specific vhca doesn't mean others will + * fail as well. + */ + continue; + state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state); + if (state != MLX5_VHCA_STATE_ACTIVE) + continue; + + sw_func_id = MLX5_GET(query_vhca_state_out, out, vhca_state_context.sw_function_id); + mutex_lock(&table->table_lock); + /* Don't probe device which is already probe */ + if (!xa_load(&table->devices, i)) + mlx5_sf_dev_add(dev, i, function_id, sw_func_id); + /* There is a race where SF got inactive after the query + * above. e.g.: the query returns that the state of the + * SF is active, and after that the eswitch manager set it to + * inactive. + * This case cannot be managed in SW, since the probing of the + * SF is on one system, and the inactivation is on a different + * system. + * If the inactive is done after the SF perform init_hca(), + * the SF will fully probe and then removed. If it was + * done before init_hca(), the SF probe will fail. + */ + mutex_unlock(&table->table_lock); + } +} + +/* In case SFs are generated externally, probe active SFs */ +static int mlx5_sf_dev_queue_active_work(struct mlx5_sf_dev_table *table) +{ + if (MLX5_CAP_GEN(table->dev, eswitch_manager)) + return 0; /* the table is local */ + + /* Use a workqueue to probe active SFs, which are in large + * quantity and may take up to minutes to probe. + */ + table->active_wq = create_singlethread_workqueue("mlx5_active_sf"); + if (!table->active_wq) + return -ENOMEM; + INIT_WORK(&table->work, &mlx5_sf_dev_add_active_work); + queue_work(table->active_wq, &table->work); + return 0; +} + +static void mlx5_sf_dev_destroy_active_work(struct mlx5_sf_dev_table *table) +{ + if (table->active_wq) { + table->stop_active_wq = true; + destroy_workqueue(table->active_wq); + } +} + void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev) { struct mlx5_sf_dev_table *table; @@ -240,11 +318,17 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev) table->base_address = pci_resource_start(dev->pdev, 2); table->max_sfs = max_sfs; xa_init(&table->devices); + mutex_init(&table->table_lock); dev->priv.sf_dev_table = table;
err = mlx5_vhca_event_notifier_register(dev, &table->nb); if (err) goto vhca_err; + + err = mlx5_sf_dev_queue_active_work(table); + if (err) + goto add_active_err; + err = mlx5_sf_dev_vhca_arm_all(table); if (err) goto arm_err; @@ -252,6 +336,8 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev) return;
arm_err: + mlx5_sf_dev_destroy_active_work(table); +add_active_err: mlx5_vhca_event_notifier_unregister(dev, &table->nb); vhca_err: table->max_sfs = 0; @@ -279,7 +365,9 @@ void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev) if (!table) return;
+ mlx5_sf_dev_destroy_active_work(table); mlx5_vhca_event_notifier_unregister(dev, &table->nb); + mutex_destroy(&table->table_lock);
/* Now that event handler is not running, it is safe to destroy * the sf device without race. diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c index af4d3e1f1a6d..3f112a897a60 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c @@ -103,7 +103,7 @@ static int sparx5_port_open(struct net_device *ndev) err = phylink_of_phy_connect(port->phylink, port->of_node, 0); if (err) { netdev_err(ndev, "Could not attach to PHY\n"); - return err; + goto err_connect; }
phylink_start(port->phylink); @@ -115,10 +115,20 @@ static int sparx5_port_open(struct net_device *ndev) err = sparx5_serdes_set(port->sparx5, port, &port->conf); else err = phy_power_on(port->serdes); - if (err) + if (err) { netdev_err(ndev, "%s failed\n", __func__); + goto out_power; + } }
+ return 0; + +out_power: + phylink_stop(port->phylink); + phylink_disconnect_phy(port->phylink); +err_connect: + sparx5_port_enable(port, false); + return err; }
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index 405786c00334..cb08d7bf9524 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -341,7 +341,7 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) return ret;
attrs.split = eth_port.is_split; - attrs.splittable = !attrs.split; + attrs.splittable = eth_port.port_lanes > 1 && !attrs.split; attrs.lanes = eth_port.port_lanes; attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; attrs.phys.port_number = eth_port.label_port; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index b19bff0db1fd..400b22ad6a34 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -1395,6 +1395,9 @@ nfp_port_get_module_info(struct net_device *netdev, u8 data;
port = nfp_port_from_netdev(netdev); + if (!port) + return -EOPNOTSUPP; + /* update port state to get latest interface */ set_bit(NFP_PORT_CHANGED, &port->flags); eth_port = nfp_port_get_eth_port(port); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 46da937ad27f..63b6b7d86ccb 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -1143,6 +1143,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter, buffer_info->dma = 0; buffer_info->time_stamp = 0; tx_ring->next_to_use = ring_num; + dev_kfree_skb_any(skb); return; } buffer_info->mapped = true; @@ -2459,6 +2460,7 @@ static void pch_gbe_remove(struct pci_dev *pdev) unregister_netdev(netdev);
pch_gbe_phy_hw_reset(&adapter->hw); + pci_dev_put(adapter->ptp_pdev);
free_netdev(netdev); } @@ -2534,7 +2536,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, /* setup the private structure */ ret = pch_gbe_sw_init(adapter); if (ret) - goto err_free_netdev; + goto err_put_dev;
/* Initialize PHY */ ret = pch_gbe_init_phy(adapter); @@ -2592,6 +2594,8 @@ static int pch_gbe_probe(struct pci_dev *pdev,
err_free_adapter: pch_gbe_phy_hw_reset(&adapter->hw); +err_put_dev: + pci_dev_put(adapter->ptp_pdev); err_free_netdev: free_netdev(netdev); return ret; diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 06f4d9a9e938..5a2d70a91868 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -2471,6 +2471,7 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb, skb_shinfo(skb)->nr_frags); if (tx_cb->seg_count == -1) { netdev_err(ndev, "%s: invalid segment count!\n", __func__); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; }
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c index 17b9d37218cb..4c33c3b5f32b 100644 --- a/drivers/net/ethernet/sfc/ef100_netdev.c +++ b/drivers/net/ethernet/sfc/ef100_netdev.c @@ -217,6 +217,7 @@ netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb, skb->len, skb->data_len, channel->channel); if (!efx->n_channels || !efx->n_tx_channels || !channel) { netif_stop_queue(net_dev); + dev_kfree_skb_any(skb); goto err; }
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index de94921cbef9..025e0c19ec25 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h @@ -98,6 +98,7 @@ struct ipvl_port { struct sk_buff_head backlog; int count; struct ida ida; + netdevice_tracker dev_tracker; };
struct ipvl_skb_cb { diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 49ba8a50dfb1..9043bcd1b41d 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -83,6 +83,7 @@ static int ipvlan_port_create(struct net_device *dev) if (err) goto err;
+ netdev_hold(dev, &port->dev_tracker, GFP_KERNEL); return 0;
err: @@ -95,6 +96,7 @@ static void ipvlan_port_destroy(struct net_device *dev) struct ipvl_port *port = ipvlan_port_get_rtnl(dev); struct sk_buff *skb;
+ netdev_put(dev, &port->dev_tracker); if (port->mode == IPVLAN_MODE_L3S) ipvlan_l3s_unregister(port); netdev_rx_handler_unregister(dev); diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index ddfa853ec9b5..104fc564a766 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -2685,11 +2685,6 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) if (ret) goto rollback;
- /* Force features update, since they are different for SW MACSec and - * HW offloading cases. - */ - netdev_update_features(dev); - rtnl_unlock(); return 0;
@@ -3457,16 +3452,9 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, return ret; }
-#define SW_MACSEC_FEATURES \ +#define MACSEC_FEATURES \ (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
-/* If h/w offloading is enabled, use real device features save for - * VLAN_FEATURES - they require additional ops - * HW_MACSEC - no reason to report it - */ -#define REAL_DEV_FEATURES(dev) \ - ((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC)) - static int macsec_dev_init(struct net_device *dev) { struct macsec_dev *macsec = macsec_priv(dev); @@ -3483,12 +3471,8 @@ static int macsec_dev_init(struct net_device *dev) return err; }
- if (macsec_is_offloaded(macsec)) { - dev->features = REAL_DEV_FEATURES(real_dev); - } else { - dev->features = real_dev->features & SW_MACSEC_FEATURES; - dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; - } + dev->features = real_dev->features & MACSEC_FEATURES; + dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
dev->needed_headroom = real_dev->needed_headroom + MACSEC_NEEDED_HEADROOM; @@ -3520,10 +3504,7 @@ static netdev_features_t macsec_fix_features(struct net_device *dev, struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev;
- if (macsec_is_offloaded(macsec)) - return REAL_DEV_FEATURES(real_dev); - - features &= (real_dev->features & SW_MACSEC_FEATURES) | + features &= (real_dev->features & MACSEC_FEATURES) | NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; features |= NETIF_F_LLTX;
@@ -3874,7 +3855,6 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], if (macsec_is_offloaded(macsec)) { const struct macsec_ops *ops; struct macsec_context ctx; - int ret;
ops = macsec_get_ops(netdev_priv(dev), &ctx); if (!ops) { diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 59fe356942b5..249e7ee4a2bb 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -862,8 +862,10 @@ static int at803x_probe(struct phy_device *phydev) .wolopts = 0, };
- if (ccr < 0) + if (ccr < 0) { + ret = ccr; goto err; + } mode_cfg = ccr & AT803X_MODE_CFG_MASK;
switch (mode_cfg) { diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 8d5cbda33f66..0897fdb6254b 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1915,6 +1915,7 @@ static const struct driver_info cdc_ncm_zlp_info = { .status = cdc_ncm_status, .rx_fixup = cdc_ncm_rx_fixup, .tx_fixup = cdc_ncm_tx_fixup, + .set_rx_mode = usbnet_cdc_update_filter, };
/* Same as cdc_ncm_info, but with FLAG_WWAN */ diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 26c34a7c21bd..afd6faa4c2ec 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1357,6 +1357,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x103a, 0)}, /* Telit LE910C4-WWX */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 9cce7dec7366..f5c88d232b11 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3933,12 +3933,11 @@ static int virtnet_probe(struct virtio_device *vdev) return 0;
free_unregister_netdev: - virtio_reset_device(vdev); - unregister_netdev(dev); free_failover: net_failover_destroy(vi->failover); free_vqs: + virtio_reset_device(vdev); cancel_delayed_work_sync(&vi->refill); free_receive_page_frags(vi); virtnet_del_vqs(vi); diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h index 2ec56a34fa81..0909d53cefeb 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.h +++ b/drivers/net/wireless/ath/ath11k/qmi.h @@ -27,7 +27,7 @@ #define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 52 #define ATH11K_QMI_CALDB_SIZE 0x480000 #define ATH11K_QMI_BDF_EXT_STR_LENGTH 0x20 -#define ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT 3 +#define ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT 5
#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035 #define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037 diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 10daef81c355..fb2c35bd73bb 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -5232,7 +5232,7 @@ static int get_wep_tx_idx(struct airo_info *ai) return -1; }
-static int set_wep_key(struct airo_info *ai, u16 index, const char *key, +static int set_wep_key(struct airo_info *ai, u16 index, const u8 *key, u16 keylen, int perm, int lock) { static const unsigned char macaddr[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; @@ -5283,7 +5283,7 @@ static void proc_wepkey_on_close(struct inode *inode, struct file *file) struct net_device *dev = pde_data(inode); struct airo_info *ai = dev->ml_priv; int i, rc; - char key[16]; + u8 key[16]; u16 index = 0; int j = 0;
@@ -5311,12 +5311,22 @@ static void proc_wepkey_on_close(struct inode *inode, struct file *file) }
for (i = 0; i < 16*3 && data->wbuffer[i+j]; i++) { + int val; + + if (i % 3 == 2) + continue; + + val = hex_to_bin(data->wbuffer[i+j]); + if (val < 0) { + airo_print_err(ai->dev->name, "WebKey passed invalid key hex"); + return; + } switch(i%3) { case 0: - key[i/3] = hex_to_bin(data->wbuffer[i+j])<<4; + key[i/3] = (u8)val << 4; break; case 1: - key[i/3] |= hex_to_bin(data->wbuffer[i+j]); + key[i/3] |= (u8)val; break; } } diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index a074552bcec3..3179682daca7 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -910,6 +910,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct sk_buff *skb; struct ieee80211_hdr *hdr; + struct ieee80211_tx_info *cb;
if (!vp->assoc) return; @@ -931,6 +932,10 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, memcpy(hdr->addr2, mac, ETH_ALEN); memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
+ cb = IEEE80211_SKB_CB(skb); + cb->control.rates[0].count = 1; + cb->control.rates[1].idx = -1; + rcu_read_lock(); mac80211_hwsim_tx_frame(data->hw, skb, rcu_dereference(vif->bss_conf.chanctx_conf)->def.chan); diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c index 3ac373d29d93..7362d4d5ea85 100644 --- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c +++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c @@ -956,30 +956,51 @@ static inline void wilc_wfi_cfg_parse_ch_attr(u8 *buf, u32 len, u8 sta_ch) return;
while (index + sizeof(*e) <= len) { + u16 attr_size; + e = (struct wilc_attr_entry *)&buf[index]; - if (e->attr_type == IEEE80211_P2P_ATTR_CHANNEL_LIST) + attr_size = le16_to_cpu(e->attr_len); + + if (index + sizeof(*e) + attr_size > len) + return; + + if (e->attr_type == IEEE80211_P2P_ATTR_CHANNEL_LIST && + attr_size >= (sizeof(struct wilc_attr_ch_list) - sizeof(*e))) ch_list_idx = index; - else if (e->attr_type == IEEE80211_P2P_ATTR_OPER_CHANNEL) + else if (e->attr_type == IEEE80211_P2P_ATTR_OPER_CHANNEL && + attr_size == (sizeof(struct wilc_attr_oper_ch) - sizeof(*e))) op_ch_idx = index; + if (ch_list_idx && op_ch_idx) break; - index += le16_to_cpu(e->attr_len) + sizeof(*e); + + index += sizeof(*e) + attr_size; }
if (ch_list_idx) { - u16 attr_size; - struct wilc_ch_list_elem *e; - int i; + u16 elem_size;
ch_list = (struct wilc_attr_ch_list *)&buf[ch_list_idx]; - attr_size = le16_to_cpu(ch_list->attr_len); - for (i = 0; i < attr_size;) { + /* the number of bytes following the final 'elem' member */ + elem_size = le16_to_cpu(ch_list->attr_len) - + (sizeof(*ch_list) - sizeof(struct wilc_attr_entry)); + for (unsigned int i = 0; i < elem_size;) { + struct wilc_ch_list_elem *e; + e = (struct wilc_ch_list_elem *)(ch_list->elem + i); + + i += sizeof(*e); + if (i > elem_size) + break; + + i += e->no_of_channels; + if (i > elem_size) + break; + if (e->op_class == WILC_WLAN_OPERATING_CLASS_2_4GHZ) { memset(e->ch_list, sta_ch, e->no_of_channels); break; } - i += e->no_of_channels; } }
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c index eb1d1ba3a443..67df8221b5ae 100644 --- a/drivers/net/wireless/microchip/wilc1000/hif.c +++ b/drivers/net/wireless/microchip/wilc1000/hif.c @@ -482,14 +482,25 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies->data, ies->len); if (rsn_ie) { + int rsn_ie_len = sizeof(struct element) + rsn_ie[1]; int offset = 8;
- param->mode_802_11i = 2; - param->rsn_found = true; /* extract RSN capabilities */ - offset += (rsn_ie[offset] * 4) + 2; - offset += (rsn_ie[offset] * 4) + 2; - memcpy(param->rsn_cap, &rsn_ie[offset], 2); + if (offset < rsn_ie_len) { + /* skip over pairwise suites */ + offset += (rsn_ie[offset] * 4) + 2; + + if (offset < rsn_ie_len) { + /* skip over authentication suites */ + offset += (rsn_ie[offset] * 4) + 2; + + if (offset + 1 < rsn_ie_len) { + param->mode_802_11i = 2; + param->rsn_found = true; + memcpy(param->rsn_cap, &rsn_ie[offset], 2); + } + } + } }
if (param->rsn_found) { diff --git a/drivers/net/wwan/iosm/iosm_ipc_coredump.c b/drivers/net/wwan/iosm/iosm_ipc_coredump.c index 9acd87724c9d..26ca30476f40 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_coredump.c +++ b/drivers/net/wwan/iosm/iosm_ipc_coredump.c @@ -2,6 +2,7 @@ /* * Copyright (C) 2020-2021 Intel Corporation. */ +#include <linux/vmalloc.h>
#include "iosm_ipc_coredump.h"
diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c index 17da85a8f337..2fe724d623c0 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_devlink.c +++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c @@ -2,6 +2,7 @@ /* * Copyright (C) 2020-2021 Intel Corporation. */ +#include <linux/vmalloc.h>
#include "iosm_ipc_chnl_cfg.h" #include "iosm_ipc_coredump.h" diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c index 97cb6846c6ae..f604d4a01e1b 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c +++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c @@ -249,7 +249,7 @@ static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev) if (object->integer.value == 3) sleep_state = IPC_PCIE_D3L2;
- kfree(object); + ACPI_FREE(object);
default_ret: return sleep_state; diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c index 3458af31e864..7d0f5e4f0a78 100644 --- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c +++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c @@ -165,6 +165,8 @@ static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name) return -EFAULT; }
+ kfree(buffer.pointer); + #endif return 0; } diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c index 7764b1a4c3cf..ec87dd21e054 100644 --- a/drivers/nfc/st-nci/se.c +++ b/drivers/nfc/st-nci/se.c @@ -312,6 +312,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev, int r = 0; struct device *dev = &ndev->nfc_dev->dev; struct nfc_evt_transaction *transaction; + u32 aid_len; + u8 params_len;
pr_debug("connectivity gate event: %x\n", event);
@@ -325,26 +327,47 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev, * Description Tag Length * AID 81 5 to 16 * PARAMETERS 82 0 to 255 + * + * The key differences are aid storage length is variably sized + * in the packet, but fixed in nfc_evt_transaction, and that + * the aid_len is u8 in the packet, but u32 in the structure, + * and the tags in the packet are not included in + * nfc_evt_transaction. + * + * size(b): 1 1 5-16 1 1 0-255 + * offset: 0 1 2 aid_len + 2 aid_len + 3 aid_len + 4 + * mem name: aid_tag(M) aid_len aid params_tag(M) params_len params + * example: 0x81 5-16 X 0x82 0-255 X */ - if (skb->len < NFC_MIN_AID_LENGTH + 2 && - skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) + if (skb->len < 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) return -EPROTO;
- transaction = devm_kzalloc(dev, skb->len - 2, GFP_KERNEL); - if (!transaction) - return -ENOMEM; + aid_len = skb->data[1];
- transaction->aid_len = skb->data[1]; - memcpy(transaction->aid, &skb->data[2], transaction->aid_len); + if (skb->len < aid_len + 4 || + aid_len > sizeof(transaction->aid)) + return -EPROTO;
- /* Check next byte is PARAMETERS tag (82) */ - if (skb->data[transaction->aid_len + 2] != - NFC_EVT_TRANSACTION_PARAMS_TAG) + params_len = skb->data[aid_len + 3]; + + /* Verify PARAMETERS tag is (82), and final check that there is + * enough space in the packet to read everything. + */ + if (skb->data[aid_len + 2] != NFC_EVT_TRANSACTION_PARAMS_TAG || + skb->len < aid_len + 4 + params_len) return -EPROTO;
- transaction->params_len = skb->data[transaction->aid_len + 3]; - memcpy(transaction->params, skb->data + - transaction->aid_len + 4, transaction->params_len); + transaction = devm_kzalloc(dev, sizeof(*transaction) + + params_len, GFP_KERNEL); + if (!transaction) + return -ENOMEM; + + transaction->aid_len = aid_len; + transaction->params_len = params_len; + + memcpy(transaction->aid, &skb->data[2], aid_len); + memcpy(transaction->params, &skb->data[aid_len + 4], + params_len);
r = nfc_se_transaction(ndev->nfc_dev, host, transaction); break; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index ed47c256dbd2..01c36284e542 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -675,6 +675,7 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd) if (req->mq_hctx->type == HCTX_TYPE_POLL) req->cmd_flags |= REQ_POLLED; nvme_clear_nvme_request(req); + req->rq_flags |= RQF_QUIET; memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd)); } EXPORT_SYMBOL_GPL(nvme_init_request); @@ -1037,7 +1038,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, goto out; }
- req->rq_flags |= RQF_QUIET; ret = nvme_execute_rq(req, at_head); if (result && ret >= 0) *result = nvme_req(req)->result; @@ -1225,7 +1225,6 @@ static void nvme_keep_alive_work(struct work_struct *work) rq->timeout = ctrl->kato * HZ; rq->end_io = nvme_keep_alive_end_io; rq->end_io_data = ctrl; - rq->rq_flags |= RQF_QUIET; blk_execute_rq_nowait(rq, false); }
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 1a6423e94eb3..0f34114c4596 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1438,7 +1438,6 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
abort_req->end_io = abort_endio; abort_req->end_io_data = NULL; - abort_req->rq_flags |= RQF_QUIET; blk_execute_rq_nowait(abort_req, false);
/* @@ -2489,7 +2488,6 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) req->end_io_data = nvmeq;
init_completion(&nvmeq->delete_done); - req->rq_flags |= RQF_QUIET; blk_execute_rq_nowait(req, false); return 0; } diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 7f52d9dac443..a79eadb953de 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -1215,6 +1215,7 @@ static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys, const char *page, size_t count) { int pos = 0, len; + char *val;
if (subsys->subsys_discovered) { pr_err("Can't set model number. %s is already assigned\n", @@ -1237,9 +1238,11 @@ static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys, return -EINVAL; }
- subsys->model_number = kmemdup_nul(page, len, GFP_KERNEL); - if (!subsys->model_number) + val = kmemdup_nul(page, len, GFP_KERNEL); + if (!val) return -ENOMEM; + kfree(subsys->model_number); + subsys->model_number = val; return count; }
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index ba64284eaf9f..f1ec8931dfbc 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -1613,7 +1613,7 @@ static void hv_pci_compose_compl(void *context, struct pci_response *resp, }
static u32 hv_compose_msi_req_v1( - struct pci_create_interrupt *int_pkt, const struct cpumask *affinity, + struct pci_create_interrupt *int_pkt, u32 slot, u8 vector, u16 vector_count) { int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; @@ -1631,6 +1631,35 @@ static u32 hv_compose_msi_req_v1( return sizeof(*int_pkt); }
+/* + * The vCPU selected by hv_compose_multi_msi_req_get_cpu() and + * hv_compose_msi_req_get_cpu() is a "dummy" vCPU because the final vCPU to be + * interrupted is specified later in hv_irq_unmask() and communicated to Hyper-V + * via the HVCALL_RETARGET_INTERRUPT hypercall. But the choice of dummy vCPU is + * not irrelevant because Hyper-V chooses the physical CPU to handle the + * interrupts based on the vCPU specified in message sent to the vPCI VSP in + * hv_compose_msi_msg(). Hyper-V's choice of pCPU is not visible to the guest, + * but assigning too many vPCI device interrupts to the same pCPU can cause a + * performance bottleneck. So we spread out the dummy vCPUs to influence Hyper-V + * to spread out the pCPUs that it selects. + * + * For the single-MSI and MSI-X cases, it's OK for hv_compose_msi_req_get_cpu() + * to always return the same dummy vCPU, because a second call to + * hv_compose_msi_msg() contains the "real" vCPU, causing Hyper-V to choose a + * new pCPU for the interrupt. But for the multi-MSI case, the second call to + * hv_compose_msi_msg() exits without sending a message to the vPCI VSP, so the + * original dummy vCPU is used. This dummy vCPU must be round-robin'ed so that + * the pCPUs are spread out. All interrupts for a multi-MSI device end up using + * the same pCPU, even though the vCPUs will be spread out by later calls + * to hv_irq_unmask(), but that is the best we can do now. + * + * With Hyper-V in Nov 2022, the HVCALL_RETARGET_INTERRUPT hypercall does *not* + * cause Hyper-V to reselect the pCPU based on the specified vCPU. Such an + * enhancement is planned for a future version. With that enhancement, the + * dummy vCPU selection won't matter, and interrupts for the same multi-MSI + * device will be spread across multiple pCPUs. + */ + /* * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten * by subsequent retarget in hv_irq_unmask(). @@ -1640,18 +1669,39 @@ static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity) return cpumask_first_and(affinity, cpu_online_mask); }
-static u32 hv_compose_msi_req_v2( - struct pci_create_interrupt2 *int_pkt, const struct cpumask *affinity, - u32 slot, u8 vector, u16 vector_count) +/* + * Make sure the dummy vCPU values for multi-MSI don't all point to vCPU0. + */ +static int hv_compose_multi_msi_req_get_cpu(void) { + static DEFINE_SPINLOCK(multi_msi_cpu_lock); + + /* -1 means starting with CPU 0 */ + static int cpu_next = -1; + + unsigned long flags; int cpu;
+ spin_lock_irqsave(&multi_msi_cpu_lock, flags); + + cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids, + false); + cpu = cpu_next; + + spin_unlock_irqrestore(&multi_msi_cpu_lock, flags); + + return cpu; +} + +static u32 hv_compose_msi_req_v2( + struct pci_create_interrupt2 *int_pkt, int cpu, + u32 slot, u8 vector, u16 vector_count) +{ int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2; int_pkt->wslot.slot = slot; int_pkt->int_desc.vector = vector; int_pkt->int_desc.vector_count = vector_count; int_pkt->int_desc.delivery_mode = DELIVERY_MODE; - cpu = hv_compose_msi_req_get_cpu(affinity); int_pkt->int_desc.processor_array[0] = hv_cpu_number_to_vp_number(cpu); int_pkt->int_desc.processor_count = 1; @@ -1660,18 +1710,15 @@ static u32 hv_compose_msi_req_v2( }
static u32 hv_compose_msi_req_v3( - struct pci_create_interrupt3 *int_pkt, const struct cpumask *affinity, + struct pci_create_interrupt3 *int_pkt, int cpu, u32 slot, u32 vector, u16 vector_count) { - int cpu; - int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3; int_pkt->wslot.slot = slot; int_pkt->int_desc.vector = vector; int_pkt->int_desc.reserved = 0; int_pkt->int_desc.vector_count = vector_count; int_pkt->int_desc.delivery_mode = DELIVERY_MODE; - cpu = hv_compose_msi_req_get_cpu(affinity); int_pkt->int_desc.processor_array[0] = hv_cpu_number_to_vp_number(cpu); int_pkt->int_desc.processor_count = 1; @@ -1715,12 +1762,18 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct pci_create_interrupt3 v3; } int_pkts; } __packed ctxt; + bool multi_msi; u64 trans_id; u32 size; int ret; + int cpu; + + msi_desc = irq_data_get_msi_desc(data); + multi_msi = !msi_desc->pci.msi_attrib.is_msix && + msi_desc->nvec_used > 1;
/* Reuse the previous allocation */ - if (data->chip_data) { + if (data->chip_data && multi_msi) { int_desc = data->chip_data; msg->address_hi = int_desc->address >> 32; msg->address_lo = int_desc->address & 0xffffffff; @@ -1728,7 +1781,6 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) return; }
- msi_desc = irq_data_get_msi_desc(data); pdev = msi_desc_to_pci_dev(msi_desc); dest = irq_data_get_effective_affinity_mask(data); pbus = pdev->bus; @@ -1738,11 +1790,18 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) if (!hpdev) goto return_null_message;
+ /* Free any previous message that might have already been composed. */ + if (data->chip_data && !multi_msi) { + int_desc = data->chip_data; + data->chip_data = NULL; + hv_int_desc_free(hpdev, int_desc); + } + int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC); if (!int_desc) goto drop_reference;
- if (!msi_desc->pci.msi_attrib.is_msix && msi_desc->nvec_used > 1) { + if (multi_msi) { /* * If this is not the first MSI of Multi MSI, we already have * a mapping. Can exit early. @@ -1767,9 +1826,11 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) */ vector = 32; vector_count = msi_desc->nvec_used; + cpu = hv_compose_multi_msi_req_get_cpu(); } else { vector = hv_msi_get_int_vector(data); vector_count = 1; + cpu = hv_compose_msi_req_get_cpu(dest); }
/* @@ -1785,7 +1846,6 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) switch (hbus->protocol_version) { case PCI_PROTOCOL_VERSION_1_1: size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, - dest, hpdev->desc.win_slot.slot, (u8)vector, vector_count); @@ -1794,7 +1854,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) case PCI_PROTOCOL_VERSION_1_2: case PCI_PROTOCOL_VERSION_1_3: size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, - dest, + cpu, hpdev->desc.win_slot.slot, (u8)vector, vector_count); @@ -1802,7 +1862,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
case PCI_PROTOCOL_VERSION_1_4: size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3, - dest, + cpu, hpdev->desc.win_slot.slot, vector, vector_count); diff --git a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c index aa2075390f3e..e96c00686a25 100644 --- a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c +++ b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c @@ -1873,8 +1873,8 @@ static const struct msm_pingroup sc8280xp_groups[] = { [225] = PINGROUP(225, hs3_mi2s, phase_flag, _, _, _, _, egpio), [226] = PINGROUP(226, hs3_mi2s, phase_flag, _, _, _, _, egpio), [227] = PINGROUP(227, hs3_mi2s, phase_flag, _, _, _, _, egpio), - [228] = UFS_RESET(ufs_reset, 0xf1004), - [229] = UFS_RESET(ufs1_reset, 0xf3004), + [228] = UFS_RESET(ufs_reset, 0xf1000), + [229] = UFS_RESET(ufs1_reset, 0xf3000), [230] = SDC_QDSD_PINGROUP(sdc2_clk, 0xe8000, 14, 6), [231] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xe8000, 11, 3), [232] = SDC_QDSD_PINGROUP(sdc2_data, 0xe8000, 9, 0), diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c index 585911020cea..023f126121d7 100644 --- a/drivers/platform/surface/surface_aggregator_registry.c +++ b/drivers/platform/surface/surface_aggregator_registry.c @@ -234,6 +234,19 @@ static const struct software_node *ssam_node_group_sl3[] = { NULL, };
+/* Devices for Surface Laptop 5. */ +static const struct software_node *ssam_node_group_sl5[] = { + &ssam_node_root, + &ssam_node_bat_ac, + &ssam_node_bat_main, + &ssam_node_tmp_pprof, + &ssam_node_hid_main_keyboard, + &ssam_node_hid_main_touchpad, + &ssam_node_hid_main_iid5, + &ssam_node_hid_sam_ucm_ucsi, + NULL, +}; + /* Devices for Surface Laptop Studio. */ static const struct software_node *ssam_node_group_sls[] = { &ssam_node_root, @@ -268,6 +281,7 @@ static const struct software_node *ssam_node_group_sp7[] = { NULL, };
+/* Devices for Surface Pro 8 */ static const struct software_node *ssam_node_group_sp8[] = { &ssam_node_root, &ssam_node_hub_kip, @@ -284,6 +298,23 @@ static const struct software_node *ssam_node_group_sp8[] = { NULL, };
+/* Devices for Surface Pro 9 */ +static const struct software_node *ssam_node_group_sp9[] = { + &ssam_node_root, + &ssam_node_hub_kip, + &ssam_node_bat_ac, + &ssam_node_bat_main, + &ssam_node_tmp_pprof, + /* TODO: Tablet mode switch (via POS subsystem) */ + &ssam_node_hid_kip_keyboard, + &ssam_node_hid_kip_penstash, + &ssam_node_hid_kip_touchpad, + &ssam_node_hid_kip_fwupd, + &ssam_node_hid_sam_sensors, + &ssam_node_hid_sam_ucm_ucsi, + NULL, +}; +
/* -- SSAM platform/meta-hub driver. ---------------------------------------- */
@@ -303,6 +334,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = { /* Surface Pro 8 */ { "MSHW0263", (unsigned long)ssam_node_group_sp8 },
+ /* Surface Pro 9 */ + { "MSHW0343", (unsigned long)ssam_node_group_sp9 }, + /* Surface Book 2 */ { "MSHW0107", (unsigned long)ssam_node_group_gen5 },
@@ -324,6 +358,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = { /* Surface Laptop 4 (13", Intel) */ { "MSHW0250", (unsigned long)ssam_node_group_sl3 },
+ /* Surface Laptop 5 */ + { "MSHW0350", (unsigned long)ssam_node_group_sl5 }, + /* Surface Laptop Go 1 */ { "MSHW0118", (unsigned long)ssam_node_group_slg1 },
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index f1259d81d86d..df4c1f08f0c6 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -564,6 +564,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = { }, .driver_data = (void *)ACER_CAP_KBD_DOCK, }, + { + .callback = set_force_caps, + .ident = "Acer Aspire Switch V 10 SW5-017", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SW5-017"), + }, + .driver_data = (void *)ACER_CAP_KBD_DOCK, + }, { .callback = set_force_caps, .ident = "Acer One 10 (S1003)", diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index eec7d0ed7cf2..8e1979b477a7 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -1656,6 +1656,8 @@ static void asus_wmi_set_xusb2pr(struct asus_wmi *asus) pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, cpu_to_le32(ports_available));
+ pci_dev_put(xhci_pdev); + pr_info("set USB_INTEL_XUSB2PR old: 0x%04x, new: 0x%04x\n", orig_ports_available, ports_available); } diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 4fbe91769c91..788381e4c6a6 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -90,6 +90,7 @@ enum hp_wmi_event_ids { HPWMI_PEAKSHIFT_PERIOD = 0x0F, HPWMI_BATTERY_CHARGE_PERIOD = 0x10, HPWMI_SANITIZATION_MODE = 0x17, + HPWMI_SMART_EXPERIENCE_APP = 0x21, };
/* @@ -857,6 +858,8 @@ static void hp_wmi_notify(u32 value, void *context) break; case HPWMI_SANITIZATION_MODE: break; + case HPWMI_SMART_EXPERIENCE_APP: + break; default: pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data); break; diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index abd0c81d62c4..3ea8fc6a9ca3 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -136,6 +136,7 @@ struct ideapad_private { bool dytc : 1; bool fan_mode : 1; bool fn_lock : 1; + bool set_fn_lock_led : 1; bool hw_rfkill_switch : 1; bool kbd_bl : 1; bool touchpad_ctrl_via_ec : 1; @@ -154,7 +155,21 @@ MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
static bool allow_v4_dytc; module_param(allow_v4_dytc, bool, 0444); -MODULE_PARM_DESC(allow_v4_dytc, "Enable DYTC version 4 platform-profile support."); +MODULE_PARM_DESC(allow_v4_dytc, + "Enable DYTC version 4 platform-profile support. " + "If you need this please report this to: platform-driver-x86@vger.kernel.org"); + +static bool hw_rfkill_switch; +module_param(hw_rfkill_switch, bool, 0444); +MODULE_PARM_DESC(hw_rfkill_switch, + "Enable rfkill support for laptops with a hw on/off wifi switch/slider. " + "If you need this please report this to: platform-driver-x86@vger.kernel.org"); + +static bool set_fn_lock_led; +module_param(set_fn_lock_led, bool, 0444); +MODULE_PARM_DESC(set_fn_lock_led, + "Enable driver based updates of the fn-lock LED on fn-lock changes. " + "If you need this please report this to: platform-driver-x86@vger.kernel.org");
/* * ACPI Helpers @@ -1501,6 +1516,9 @@ static void ideapad_wmi_notify(u32 value, void *context) ideapad_input_report(priv, value); break; case 208: + if (!priv->features.set_fn_lock_led) + break; + if (!eval_hals(priv->adev->handle, &result)) { bool state = test_bit(HALS_FNLOCK_STATE_BIT, &result);
@@ -1514,6 +1532,18 @@ static void ideapad_wmi_notify(u32 value, void *context) } #endif
+/* On some models we need to call exec_sals(SALS_FNLOCK_ON/OFF) to set the LED */ +static const struct dmi_system_id set_fn_lock_led_list[] = { + { + /* https://bugzilla.kernel.org/show_bug.cgi?id=212671 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion R7000P2020H"), + } + }, + {} +}; + /* * Some ideapads have a hardware rfkill switch, but most do not have one. * Reading VPCCMD_R_RF always results in 0 on models without a hardware rfkill, @@ -1533,15 +1563,41 @@ static const struct dmi_system_id hw_rfkill_list[] = { {} };
+static const struct dmi_system_id no_touchpad_switch_list[] = { + { + .ident = "Lenovo Yoga 3 Pro 1370", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 3"), + }, + }, + { + .ident = "ZhaoYang K4e-IML", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ZhaoYang K4e-IML"), + }, + }, + {} +}; + static void ideapad_check_features(struct ideapad_private *priv) { acpi_handle handle = priv->adev->handle; unsigned long val;
- priv->features.hw_rfkill_switch = dmi_check_system(hw_rfkill_list); + priv->features.set_fn_lock_led = + set_fn_lock_led || dmi_check_system(set_fn_lock_led_list); + priv->features.hw_rfkill_switch = + hw_rfkill_switch || dmi_check_system(hw_rfkill_list);
/* Most ideapads with ELAN0634 touchpad don't use EC touchpad switch */ - priv->features.touchpad_ctrl_via_ec = !acpi_dev_present("ELAN0634", NULL, -1); + if (acpi_dev_present("ELAN0634", NULL, -1)) + priv->features.touchpad_ctrl_via_ec = 0; + else if (dmi_check_system(no_touchpad_switch_list)) + priv->features.touchpad_ctrl_via_ec = 0; + else + priv->features.touchpad_ctrl_via_ec = 1;
if (!read_ec_data(handle, VPCCMD_R_FAN, &val)) priv->features.fan_mode = true; diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index 79cff1fc675c..b6313ecd190c 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -27,6 +27,9 @@ static const struct acpi_device_id intel_hid_ids[] = { {"INTC1051", 0}, {"INTC1054", 0}, {"INTC1070", 0}, + {"INTC1076", 0}, + {"INTC1077", 0}, + {"INTC1078", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, intel_hid_ids); diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c index 53d7fd2943b4..46598dcb634a 100644 --- a/drivers/platform/x86/intel/pmt/class.c +++ b/drivers/platform/x86/intel/pmt/class.c @@ -9,6 +9,7 @@ */
#include <linux/kernel.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/pci.h> @@ -19,6 +20,7 @@ #define PMT_XA_START 0 #define PMT_XA_MAX INT_MAX #define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX) +#define GUID_SPR_PUNIT 0x9956f43f
bool intel_pmt_is_early_client_hw(struct device *dev) { @@ -33,6 +35,29 @@ bool intel_pmt_is_early_client_hw(struct device *dev) } EXPORT_SYMBOL_GPL(intel_pmt_is_early_client_hw);
+static inline int +pmt_memcpy64_fromio(void *to, const u64 __iomem *from, size_t count) +{ + int i, remain; + u64 *buf = to; + + if (!IS_ALIGNED((unsigned long)from, 8)) + return -EFAULT; + + for (i = 0; i < count/8; i++) + buf[i] = readq(&from[i]); + + /* Copy any remaining bytes */ + remain = count % 8; + if (remain) { + u64 tmp = readq(&from[i]); + + memcpy(&buf[i], &tmp, remain); + } + + return count; +} + /* * sysfs */ @@ -54,7 +79,11 @@ intel_pmt_read(struct file *filp, struct kobject *kobj, if (count > entry->size - off) count = entry->size - off;
- memcpy_fromio(buf, entry->base + off, count); + if (entry->guid == GUID_SPR_PUNIT) + /* PUNIT on SPR only supports aligned 64-bit read */ + count = pmt_memcpy64_fromio(buf, entry->base + off, count); + else + memcpy_fromio(buf, entry->base + off, count);
return count; } diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 353507d18e11..67dc335fca0c 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -4497,6 +4497,14 @@ static const struct dmi_system_id fwbug_list[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "21A0"), } }, + { + .ident = "P14s Gen2 AMD", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21A1"), + } + }, {} };
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index bc97bfa8e8a6..baae3120efd0 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -770,6 +770,22 @@ static const struct ts_dmi_data predia_basic_data = { .properties = predia_basic_props, };
+static const struct property_entry rca_cambio_w101_v2_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 4), + PROPERTY_ENTRY_U32("touchscreen-min-y", 20), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1644), + PROPERTY_ENTRY_U32("touchscreen-size-y", 874), + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rca-cambio-w101-v2.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + { } +}; + +static const struct ts_dmi_data rca_cambio_w101_v2_data = { + .acpi_name = "MSSL1680:00", + .properties = rca_cambio_w101_v2_props, +}; + static const struct property_entry rwc_nanote_p8_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-y", 46), PROPERTY_ENTRY_U32("touchscreen-size-x", 1728), @@ -1409,6 +1425,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = { DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"), }, }, + { + /* RCA Cambio W101 v2 */ + /* https://github.com/onitake/gsl-firmware/discussions/193 */ + .driver_data = (void *)&rca_cambio_w101_v2_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "RCA"), + DMI_MATCH(DMI_PRODUCT_NAME, "W101SA23T1"), + }, + }, { /* RWC NANOTE P8 */ .driver_data = (void *)&rwc_nanote_p8_data, diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c index 863fabe05bdc..307ee6f71042 100644 --- a/drivers/power/supply/ab8500_btemp.c +++ b/drivers/power/supply/ab8500_btemp.c @@ -725,7 +725,14 @@ static int ab8500_btemp_probe(struct platform_device *pdev) /* Get thermal zone and ADC */ di->tz = thermal_zone_get_zone_by_name("battery-thermal"); if (IS_ERR(di->tz)) { - return dev_err_probe(dev, PTR_ERR(di->tz), + ret = PTR_ERR(di->tz); + /* + * This usually just means we are probing before the thermal + * zone, so just defer. + */ + if (ret == -ENODEV) + ret = -EPROBE_DEFER; + return dev_err_probe(dev, ret, "failed to get battery thermal zone\n"); } di->bat_ctrl = devm_iio_channel_get(dev, "bat_ctrl"); diff --git a/drivers/power/supply/ip5xxx_power.c b/drivers/power/supply/ip5xxx_power.c index 218e8e689a3f..00221e9c0bfc 100644 --- a/drivers/power/supply/ip5xxx_power.c +++ b/drivers/power/supply/ip5xxx_power.c @@ -352,7 +352,7 @@ static int ip5xxx_battery_get_property(struct power_supply *psy, ret = ip5xxx_battery_read_adc(ip5xxx, IP5XXX_BATIADC_DAT0, IP5XXX_BATIADC_DAT1, &raw);
- val->intval = DIV_ROUND_CLOSEST(raw * 745985, 1000); + val->intval = DIV_ROUND_CLOSEST(raw * 149197, 200); return 0;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index c3871565fd7d..c0f368f1b49f 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -5138,6 +5138,7 @@ static void regulator_dev_release(struct device *dev) { struct regulator_dev *rdev = dev_get_drvdata(dev);
+ debugfs_remove_recursive(rdev->debugfs); kfree(rdev->constraints); of_node_put(rdev->dev.of_node); kfree(rdev); @@ -5616,11 +5617,15 @@ regulator_register(const struct regulator_desc *regulator_desc, mutex_lock(®ulator_list_mutex); regulator_ena_gpio_free(rdev); mutex_unlock(®ulator_list_mutex); + put_device(&rdev->dev); + rdev = NULL; clean: if (dangling_of_gpiod) gpiod_put(config->ena_gpiod); + if (rdev && rdev->dev.of_node) + of_node_put(rdev->dev.of_node); + kfree(rdev); kfree(config); - put_device(&rdev->dev); rinse: if (dangling_cfg_gpiod) gpiod_put(cfg->ena_gpiod); @@ -5649,7 +5654,6 @@ void regulator_unregister(struct regulator_dev *rdev)
mutex_lock(®ulator_list_mutex);
- debugfs_remove_recursive(rdev->debugfs); WARN_ON(rdev->open_count); regulator_remove_coupling(rdev); unset_regulator_supplies(rdev); diff --git a/drivers/regulator/rt5759-regulator.c b/drivers/regulator/rt5759-regulator.c index 6b96899eb27e..8488417f4b2c 100644 --- a/drivers/regulator/rt5759-regulator.c +++ b/drivers/regulator/rt5759-regulator.c @@ -243,6 +243,7 @@ static int rt5759_regulator_register(struct rt5759_priv *priv) if (priv->chip_type == CHIP_TYPE_RT5759A) reg_desc->uV_step = RT5759A_STEP_UV;
+ memset(®_cfg, 0, sizeof(reg_cfg)); reg_cfg.dev = priv->dev; reg_cfg.of_node = np; reg_cfg.init_data = of_get_regulator_init_data(priv->dev, np, reg_desc); diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c index 430265c404d6..7c7e3648ea4b 100644 --- a/drivers/regulator/twl6030-regulator.c +++ b/drivers/regulator/twl6030-regulator.c @@ -530,6 +530,7 @@ static const struct twlreg_info TWL6030_INFO_##label = { \ #define TWL6032_ADJUSTABLE_LDO(label, offset) \ static const struct twlreg_info TWL6032_INFO_##label = { \ .base = offset, \ + .features = TWL6032_SUBCLASS, \ .desc = { \ .name = #label, \ .id = TWL6032_REG_##label, \ @@ -562,6 +563,7 @@ static const struct twlreg_info TWLFIXED_INFO_##label = { \ #define TWL6032_ADJUSTABLE_SMPS(label, offset) \ static const struct twlreg_info TWLSMPS_INFO_##label = { \ .base = offset, \ + .features = TWL6032_SUBCLASS, \ .desc = { \ .name = #label, \ .id = TWL6032_REG_##label, \ diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 3cc93e2e4e15..2dec81e7e6ab 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -4681,7 +4681,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, struct dasd_device *basedev; struct req_iterator iter; struct dasd_ccw_req *cqr; - unsigned int first_offs; unsigned int trkcount; unsigned long *idaws; unsigned int size; @@ -4715,7 +4714,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) / DASD_RAW_SECTORS_PER_TRACK; trkcount = last_trk - first_trk + 1; - first_offs = 0;
if (rq_data_dir(req) == READ) cmd = DASD_ECKD_CCW_READ_TRACK; @@ -4759,13 +4757,13 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
if (use_prefix) { prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev, - startdev, 1, first_offs + 1, trkcount, 0, 0); + startdev, 1, 0, trkcount, 0, 0); } else { define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0); ccw[-1].flags |= CCW_FLAG_CC;
data += sizeof(struct DE_eckd_data); - locate_record_ext(ccw++, data, first_trk, first_offs + 1, + locate_record_ext(ccw++, data, first_trk, 0, trkcount, cmd, basedev, 0, 0); }
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 59ac98f2bd27..b02c631f3b71 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -233,8 +233,11 @@ static void __init ap_init_qci_info(void) if (!ap_qci_info) return; ap_qci_info_old = kzalloc(sizeof(*ap_qci_info_old), GFP_KERNEL); - if (!ap_qci_info_old) + if (!ap_qci_info_old) { + kfree(ap_qci_info); + ap_qci_info = NULL; return; + } if (ap_fetch_qci_info(ap_qci_info) != 0) { kfree(ap_qci_info); kfree(ap_qci_info_old); diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index 8fb34b8eeb18..5ad251477593 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -342,7 +342,10 @@ static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg, }; struct { struct type6_hdr hdr; - struct CPRBX cprbx; + union { + struct CPRBX cprbx; + DECLARE_FLEX_ARRAY(u8, userdata); + }; } __packed * msg = ap_msg->msg;
int rcblen = CEIL4(xcrb->request_control_blk_length); @@ -403,7 +406,8 @@ static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg, msg->hdr.fromcardlen2 = xcrb->reply_data_length;
/* prepare CPRB */ - if (z_copy_from_user(userspace, &msg->cprbx, xcrb->request_control_blk_addr, + if (z_copy_from_user(userspace, msg->userdata, + xcrb->request_control_blk_addr, xcrb->request_control_blk_length)) return -EFAULT; if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) > @@ -469,9 +473,14 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap
struct { struct type6_hdr hdr; - struct ep11_cprb cprbx; - unsigned char pld_tag; /* fixed value 0x30 */ - unsigned char pld_lenfmt; /* payload length format */ + union { + struct { + struct ep11_cprb cprbx; + unsigned char pld_tag; /* fixed value 0x30 */ + unsigned char pld_lenfmt; /* length format */ + } __packed; + DECLARE_FLEX_ARRAY(u8, userdata); + }; } __packed * msg = ap_msg->msg;
struct pld_hdr { @@ -500,7 +509,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap msg->hdr.fromcardlen1 = xcrb->resp_len;
/* Import CPRB data from the ioctl input parameter */ - if (z_copy_from_user(userspace, &msg->cprbx.cprb_len, + if (z_copy_from_user(userspace, msg->userdata, (char __force __user *)xcrb->req, xcrb->req_len)) { return -EFAULT; } diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 00684e11976b..1a0c0b7289d2 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -708,8 +708,13 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost) memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE); vhost->async_crq.cur = 0;
- list_for_each_entry(tgt, &vhost->targets, queue) - ibmvfc_del_tgt(tgt); + list_for_each_entry(tgt, &vhost->targets, queue) { + if (vhost->client_migrated) + tgt->need_login = 1; + else + ibmvfc_del_tgt(tgt); + } + scsi_block_requests(vhost->host); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); vhost->job_step = ibmvfc_npiv_login; @@ -3235,9 +3240,12 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost, /* We need to re-setup the interpartition connection */ dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n"); vhost->client_migrated = 1; + + scsi_block_requests(vhost->host); ibmvfc_purge_requests(vhost, DID_REQUEUE); - ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); + ibmvfc_set_host_state(vhost, IBMVFC_LINK_DOWN); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE); + wake_up(&vhost->work_wait_q); } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) { dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format); ibmvfc_purge_requests(vhost, DID_ERROR); diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index bfa1165e23b6..1b4d1e562de8 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -2930,7 +2930,8 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, }
if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) && - (scmd->cmnd[0] != ATA_16)) { + (scmd->cmnd[0] != ATA_16) && + mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) { ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__, scmd->result); scsi_print_command(scmd); diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 95f940f5c996..7346098c1c68 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -1899,6 +1899,13 @@ static int resp_readcap16(struct scsi_cmnd *scp, arr[14] |= 0x40; }
+ /* + * Since the scsi_debug READ CAPACITY implementation always reports the + * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices. + */ + if (devip->zmodel == BLK_ZONED_HM) + arr[12] |= 1 << 4; + arr[15] = sdebug_lowest_aligned & 0xff;
if (have_dif_prot) { diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index cd3db9684e52..f473c002fa4d 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -231,7 +231,7 @@ iscsi_create_endpoint(int dd_size) dev_set_name(&ep->dev, "ep-%d", id); err = device_register(&ep->dev); if (err) - goto free_id; + goto put_dev;
err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group); if (err) @@ -245,10 +245,12 @@ iscsi_create_endpoint(int dd_size) device_unregister(&ep->dev); return NULL;
-free_id: +put_dev: mutex_lock(&iscsi_ep_idr_mutex); idr_remove(&iscsi_ep_idr, id); mutex_unlock(&iscsi_ep_idr_mutex); + put_device(&ep->dev); + return NULL; free_ep: kfree(ep); return NULL; @@ -766,7 +768,7 @@ iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *transport,
err = device_register(&iface->dev); if (err) - goto free_iface; + goto put_dev;
err = sysfs_create_group(&iface->dev.kobj, &iscsi_iface_group); if (err) @@ -780,9 +782,8 @@ iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *transport, device_unregister(&iface->dev); return NULL;
-free_iface: - put_device(iface->dev.parent); - kfree(iface); +put_dev: + put_device(&iface->dev); return NULL; } EXPORT_SYMBOL_GPL(iscsi_create_iface); @@ -1251,15 +1252,15 @@ iscsi_create_flashnode_sess(struct Scsi_Host *shost, int index,
err = device_register(&fnode_sess->dev); if (err) - goto free_fnode_sess; + goto put_dev;
if (dd_size) fnode_sess->dd_data = &fnode_sess[1];
return fnode_sess;
-free_fnode_sess: - kfree(fnode_sess); +put_dev: + put_device(&fnode_sess->dev); return NULL; } EXPORT_SYMBOL_GPL(iscsi_create_flashnode_sess); @@ -1299,15 +1300,15 @@ iscsi_create_flashnode_conn(struct Scsi_Host *shost,
err = device_register(&fnode_conn->dev); if (err) - goto free_fnode_conn; + goto put_dev;
if (dd_size) fnode_conn->dd_data = &fnode_conn[1];
return fnode_conn;
-free_fnode_conn: - kfree(fnode_conn); +put_dev: + put_device(&fnode_conn->dev); return NULL; } EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn); @@ -4815,7 +4816,7 @@ iscsi_register_transport(struct iscsi_transport *tt) dev_set_name(&priv->dev, "%s", tt->name); err = device_register(&priv->dev); if (err) - goto free_priv; + goto put_dev;
err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group); if (err) @@ -4850,8 +4851,8 @@ iscsi_register_transport(struct iscsi_transport *tt) unregister_dev: device_unregister(&priv->dev); return NULL; -free_priv: - kfree(priv); +put_dev: + put_device(&priv->dev); return NULL; } EXPORT_SYMBOL_GPL(iscsi_register_transport); diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 8ced292c4b96..d93604318ecd 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -300,16 +300,21 @@ enum storvsc_request_type { };
/* - * SRB status codes and masks; a subset of the codes used here. + * SRB status codes and masks. In the 8-bit field, the two high order bits + * are flags, while the remaining 6 bits are an integer status code. The + * definitions here include only the subset of the integer status codes that + * are tested for in this driver. */ - #define SRB_STATUS_AUTOSENSE_VALID 0x80 #define SRB_STATUS_QUEUE_FROZEN 0x40 -#define SRB_STATUS_INVALID_LUN 0x20 -#define SRB_STATUS_SUCCESS 0x01 -#define SRB_STATUS_ABORTED 0x02 -#define SRB_STATUS_ERROR 0x04 -#define SRB_STATUS_DATA_OVERRUN 0x12 + +/* SRB status integer codes */ +#define SRB_STATUS_SUCCESS 0x01 +#define SRB_STATUS_ABORTED 0x02 +#define SRB_STATUS_ERROR 0x04 +#define SRB_STATUS_INVALID_REQUEST 0x06 +#define SRB_STATUS_DATA_OVERRUN 0x12 +#define SRB_STATUS_INVALID_LUN 0x20
#define SRB_STATUS(status) \ (status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN)) @@ -966,38 +971,25 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, void (*process_err_fn)(struct work_struct *work); struct hv_host_device *host_dev = shost_priv(host);
- /* - * In some situations, Hyper-V sets multiple bits in the - * srb_status, such as ABORTED and ERROR. So process them - * individually, with the most specific bits first. - */ - - if (vm_srb->srb_status & SRB_STATUS_INVALID_LUN) { - set_host_byte(scmnd, DID_NO_CONNECT); - process_err_fn = storvsc_remove_lun; - goto do_work; - } + switch (SRB_STATUS(vm_srb->srb_status)) { + case SRB_STATUS_ERROR: + case SRB_STATUS_ABORTED: + case SRB_STATUS_INVALID_REQUEST: + if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) { + /* Check for capacity change */ + if ((asc == 0x2a) && (ascq == 0x9)) { + process_err_fn = storvsc_device_scan; + /* Retry the I/O that triggered this. */ + set_host_byte(scmnd, DID_REQUEUE); + goto do_work; + }
- if (vm_srb->srb_status & SRB_STATUS_ABORTED) { - if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID && - /* Capacity data has changed */ - (asc == 0x2a) && (ascq == 0x9)) { - process_err_fn = storvsc_device_scan; /* - * Retry the I/O that triggered this. + * Otherwise, let upper layer deal with the + * error when sense message is present */ - set_host_byte(scmnd, DID_REQUEUE); - goto do_work; - } - } - - if (vm_srb->srb_status & SRB_STATUS_ERROR) { - /* - * Let upper layer deal with error when - * sense message is present. - */ - if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) return; + }
/* * If there is an error; offline the device since all @@ -1020,6 +1012,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, default: set_host_byte(scmnd, DID_ERROR); } + return; + + case SRB_STATUS_INVALID_LUN: + set_host_byte(scmnd, DID_NO_CONNECT); + process_err_fn = storvsc_remove_lun; + goto do_work; + } return;
diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c index 1322b8cce5b7..ababb910b391 100644 --- a/drivers/spi/spi-dw-dma.c +++ b/drivers/spi/spi-dw-dma.c @@ -128,12 +128,15 @@ static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
dw_spi_dma_sg_burst_init(dws);
+ pci_dev_put(dma_dev); + return 0;
free_rxchan: dma_release_channel(dws->rxchan); dws->rxchan = NULL; err_exit: + pci_dev_put(dma_dev); return -EBUSY; }
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 30d82cc7300b..d209930069cf 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -444,8 +444,7 @@ static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx, unsigned int pre, post; unsigned int fin = spi_imx->spi_clk;
- if (unlikely(fspi > fin)) - return 0; + fspi = min(fspi, fin);
post = fls(fin) - fls(fspi); if (fin > fspi << post) @@ -1607,6 +1606,13 @@ static int spi_imx_transfer_one(struct spi_controller *controller, if (spi_imx->slave_mode) return spi_imx_pio_transfer_slave(spi, transfer);
+ /* + * If we decided in spi_imx_can_dma() that we want to do a DMA + * transfer, the SPI transfer has already been mapped, so we + * have to do the DMA transfer here. + */ + if (spi_imx->usedma) + return spi_imx_dma_transfer(spi_imx, transfer); /* * Calculate the estimated time in us the transfer runs. Find * the number of Hz per byte per polling limit. @@ -1618,9 +1624,6 @@ static int spi_imx_transfer_one(struct spi_controller *controller, if (transfer->len < byte_limit) return spi_imx_poll_transfer(spi, transfer);
- if (spi_imx->usedma) - return spi_imx_dma_transfer(spi_imx, transfer); - return spi_imx_pio_transfer(spi, transfer); }
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index 3c2fa2e2f94a..def09cf0dc14 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -434,7 +434,7 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz, u32 div, mbrdiv;
/* Ensure spi->clk_rate is even */ - div = DIV_ROUND_UP(spi->clk_rate & ~0x1, speed_hz); + div = DIV_ROUND_CLOSEST(spi->clk_rate & ~0x1, speed_hz);
/* * SPI framework set xfer->speed_hz to master->max_speed_hz if diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c index 904972606bd4..9f356612ba7e 100644 --- a/drivers/spi/spi-tegra210-quad.c +++ b/drivers/spi/spi-tegra210-quad.c @@ -720,6 +720,9 @@ static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct s
static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi) { + if (!tqspi->soc_data->has_dma) + return; + if (tqspi->tx_dma_buf) { dma_free_coherent(tqspi->dev, tqspi->dma_buf_size, tqspi->tx_dma_buf, tqspi->tx_dma_phys); @@ -750,6 +753,9 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi) u32 *dma_buf; int err;
+ if (!tqspi->soc_data->has_dma) + return 0; + dma_chan = dma_request_chan(tqspi->dev, "rx"); if (IS_ERR(dma_chan)) { err = PTR_ERR(dma_chan); @@ -918,8 +924,9 @@ static int tegra_qspi_start_transfer_one(struct spi_device *spi, static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi) { struct tegra_qspi_client_data *cdata; + struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
- cdata = devm_kzalloc(&spi->dev, sizeof(*cdata), GFP_KERNEL); + cdata = devm_kzalloc(tqspi->dev, sizeof(*cdata), GFP_KERNEL); if (!cdata) return NULL;
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c index f3947be13e2e..64f0e047c23d 100644 --- a/drivers/tee/optee/device.c +++ b/drivers/tee/optee/device.c @@ -80,7 +80,7 @@ static int optee_register_device(const uuid_t *device_uuid) rc = device_register(&optee_device->dev); if (rc) { pr_err("device registration failed, err: %d\n", rc); - kfree(optee_device); + put_device(&optee_device->dev); }
return rc; diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 2a0de70e0be4..ae02aed6bd0c 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -248,7 +248,7 @@ struct gsm_mux { bool constipated; /* Asked by remote to shut up */ bool has_devices; /* Devices were registered */
- struct mutex tx_mutex; + spinlock_t tx_lock; unsigned int tx_bytes; /* TX data outstanding */ #define TX_THRESH_HI 8192 #define TX_THRESH_LO 2048 @@ -256,7 +256,7 @@ struct gsm_mux { struct list_head tx_data_list; /* Pending data packets */
/* Control messages */ - struct delayed_work kick_timeout; /* Kick TX queuing on timeout */ + struct timer_list kick_timer; /* Kick TX queuing on timeout */ struct timer_list t2_timer; /* Retransmit timer for commands */ int cretries; /* Command retry counter */ struct gsm_control *pending_cmd;/* Our current pending command */ @@ -680,6 +680,7 @@ static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control) struct gsm_msg *msg; u8 *dp; int ocr; + unsigned long flags;
msg = gsm_data_alloc(gsm, addr, 0, control); if (!msg) @@ -701,10 +702,10 @@ static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
gsm_print_packet("Q->", addr, cr, control, NULL, 0);
- mutex_lock(&gsm->tx_mutex); + spin_lock_irqsave(&gsm->tx_lock, flags); list_add_tail(&msg->list, &gsm->tx_ctrl_list); gsm->tx_bytes += msg->len; - mutex_unlock(&gsm->tx_mutex); + spin_unlock_irqrestore(&gsm->tx_lock, flags); gsmld_write_trigger(gsm);
return 0; @@ -729,7 +730,7 @@ static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci) spin_unlock_irqrestore(&dlci->lock, flags);
/* Clear data packets in MUX write queue */ - mutex_lock(&gsm->tx_mutex); + spin_lock_irqsave(&gsm->tx_lock, flags); list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) { if (msg->addr != addr) continue; @@ -737,7 +738,7 @@ static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci) list_del(&msg->list); kfree(msg); } - mutex_unlock(&gsm->tx_mutex); + spin_unlock_irqrestore(&gsm->tx_lock, flags); }
/** @@ -1008,7 +1009,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) gsm->tx_bytes += msg->len;
gsmld_write_trigger(gsm); - schedule_delayed_work(&gsm->kick_timeout, 10 * gsm->t1 * HZ / 100); + mod_timer(&gsm->kick_timer, jiffies + 10 * gsm->t1 * HZ / 100); }
/** @@ -1023,9 +1024,10 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) { - mutex_lock(&dlci->gsm->tx_mutex); + unsigned long flags; + spin_lock_irqsave(&dlci->gsm->tx_lock, flags); __gsm_data_queue(dlci, msg); - mutex_unlock(&dlci->gsm->tx_mutex); + spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags); }
/** @@ -1037,7 +1039,7 @@ static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) * is data. Keep to the MRU of the mux. This path handles the usual tty * interface which is a byte stream with optional modem data. * - * Caller must hold the tx_mutex of the mux. + * Caller must hold the tx_lock of the mux. */
static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci) @@ -1097,7 +1099,7 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci) * is data. Keep to the MRU of the mux. This path handles framed data * queued as skbuffs to the DLCI. * - * Caller must hold the tx_mutex of the mux. + * Caller must hold the tx_lock of the mux. */
static int gsm_dlci_data_output_framed(struct gsm_mux *gsm, @@ -1113,7 +1115,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm, if (dlci->adaption == 4) overhead = 1;
- /* dlci->skb is locked by tx_mutex */ + /* dlci->skb is locked by tx_lock */ if (dlci->skb == NULL) { dlci->skb = skb_dequeue_tail(&dlci->skb_list); if (dlci->skb == NULL) @@ -1167,7 +1169,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm, * Push an empty frame in to the transmit queue to update the modem status * bits and to transmit an optional break. * - * Caller must hold the tx_mutex of the mux. + * Caller must hold the tx_lock of the mux. */
static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci, @@ -1281,12 +1283,13 @@ static int gsm_dlci_data_sweep(struct gsm_mux *gsm)
static void gsm_dlci_data_kick(struct gsm_dlci *dlci) { + unsigned long flags; int sweep;
if (dlci->constipated) return;
- mutex_lock(&dlci->gsm->tx_mutex); + spin_lock_irqsave(&dlci->gsm->tx_lock, flags); /* If we have nothing running then we need to fire up */ sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO); if (dlci->gsm->tx_bytes == 0) { @@ -1297,7 +1300,7 @@ static void gsm_dlci_data_kick(struct gsm_dlci *dlci) } if (sweep) gsm_dlci_data_sweep(dlci->gsm); - mutex_unlock(&dlci->gsm->tx_mutex); + spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags); }
/* @@ -1981,23 +1984,24 @@ static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len) }
/** - * gsm_kick_timeout - transmit if possible - * @work: work contained in our gsm object + * gsm_kick_timer - transmit if possible + * @t: timer contained in our gsm object * * Transmit data from DLCIs if the queue is empty. We can't rely on * a tty wakeup except when we filled the pipe so we need to fire off * new data ourselves in other cases. */ -static void gsm_kick_timeout(struct work_struct *work) +static void gsm_kick_timer(struct timer_list *t) { - struct gsm_mux *gsm = container_of(work, struct gsm_mux, kick_timeout.work); + struct gsm_mux *gsm = from_timer(gsm, t, kick_timer); + unsigned long flags; int sent = 0;
- mutex_lock(&gsm->tx_mutex); + spin_lock_irqsave(&gsm->tx_lock, flags); /* If we have nothing running then we need to fire up */ if (gsm->tx_bytes < TX_THRESH_LO) sent = gsm_dlci_data_sweep(gsm); - mutex_unlock(&gsm->tx_mutex); + spin_unlock_irqrestore(&gsm->tx_lock, flags);
if (sent && debug & 4) pr_info("%s TX queue stalled\n", __func__); @@ -2454,7 +2458,7 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc) }
/* Finish outstanding timers, making sure they are done */ - cancel_delayed_work_sync(&gsm->kick_timeout); + del_timer_sync(&gsm->kick_timer); del_timer_sync(&gsm->t2_timer);
/* Finish writing to ldisc */ @@ -2527,7 +2531,6 @@ static void gsm_free_mux(struct gsm_mux *gsm) break; } } - mutex_destroy(&gsm->tx_mutex); mutex_destroy(&gsm->mutex); kfree(gsm->txframe); kfree(gsm->buf); @@ -2599,15 +2602,15 @@ static struct gsm_mux *gsm_alloc_mux(void) } spin_lock_init(&gsm->lock); mutex_init(&gsm->mutex); - mutex_init(&gsm->tx_mutex); kref_init(&gsm->ref); INIT_LIST_HEAD(&gsm->tx_ctrl_list); INIT_LIST_HEAD(&gsm->tx_data_list); - INIT_DELAYED_WORK(&gsm->kick_timeout, gsm_kick_timeout); + timer_setup(&gsm->kick_timer, gsm_kick_timer, 0); timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0); INIT_WORK(&gsm->tx_work, gsmld_write_task); init_waitqueue_head(&gsm->event); spin_lock_init(&gsm->control_lock); + spin_lock_init(&gsm->tx_lock);
gsm->t1 = T1; gsm->t2 = T2; @@ -2632,7 +2635,6 @@ static struct gsm_mux *gsm_alloc_mux(void) } spin_unlock(&gsm_mux_lock); if (i == MAX_MUX) { - mutex_destroy(&gsm->tx_mutex); mutex_destroy(&gsm->mutex); kfree(gsm->txframe); kfree(gsm->buf); @@ -2788,16 +2790,17 @@ static void gsmld_write_trigger(struct gsm_mux *gsm) static void gsmld_write_task(struct work_struct *work) { struct gsm_mux *gsm = container_of(work, struct gsm_mux, tx_work); + unsigned long flags; int i, ret;
/* All outstanding control channel and control messages and one data * frame is sent. */ ret = -ENODEV; - mutex_lock(&gsm->tx_mutex); + spin_lock_irqsave(&gsm->tx_lock, flags); if (gsm->tty) ret = gsm_data_kick(gsm); - mutex_unlock(&gsm->tx_mutex); + spin_unlock_irqrestore(&gsm->tx_lock, flags);
if (ret >= 0) for (i = 0; i < NUM_DLCI; i++) @@ -3005,6 +3008,7 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file, const unsigned char *buf, size_t nr) { struct gsm_mux *gsm = tty->disc_data; + unsigned long flags; int space; int ret;
@@ -3012,13 +3016,13 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file, return -ENODEV;
ret = -ENOBUFS; - mutex_lock(&gsm->tx_mutex); + spin_lock_irqsave(&gsm->tx_lock, flags); space = tty_write_room(tty); if (space >= nr) ret = tty->ops->write(tty, buf, nr); else set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); - mutex_unlock(&gsm->tx_mutex); + spin_unlock_irqrestore(&gsm->tx_lock, flags);
return ret; } @@ -3315,13 +3319,14 @@ static struct tty_ldisc_ops tty_ldisc_packet = { static void gsm_modem_upd_via_data(struct gsm_dlci *dlci, u8 brk) { struct gsm_mux *gsm = dlci->gsm; + unsigned long flags;
if (dlci->state != DLCI_OPEN || dlci->adaption != 2) return;
- mutex_lock(&gsm->tx_mutex); + spin_lock_irqsave(&gsm->tx_lock, flags); gsm_dlci_modem_output(gsm, dlci, brk); - mutex_unlock(&gsm->tx_mutex); + spin_unlock_irqrestore(&gsm->tx_lock, flags); }
/** diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index b96fbf8d31df..2ad735dd6c05 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -293,6 +293,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up) { struct omap8250_priv *priv = up->port.private_data; struct uart_8250_dma *dma = up->dma; + u8 mcr = serial8250_in_MCR(up);
if (dma && dma->tx_running) { /* @@ -309,7 +310,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up) serial_out(up, UART_EFR, UART_EFR_ECB);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); - serial8250_out_MCR(up, UART_MCR_TCRTLR); + serial8250_out_MCR(up, mcr | UART_MCR_TCRTLR); serial_out(up, UART_FCR, up->fcr);
omap8250_update_scr(up, priv); @@ -325,7 +326,8 @@ static void omap8250_restore_regs(struct uart_8250_port *up) serial_out(up, UART_LCR, 0);
/* drop TCR + TLR access, we setup XON/XOFF later */ - serial8250_out_MCR(up, up->mcr); + serial8250_out_MCR(up, mcr); + serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); @@ -670,7 +672,6 @@ static int omap_8250_startup(struct uart_port *port)
pm_runtime_get_sync(port->dev);
- up->mcr = 0; serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
serial_out(up, UART_LCR, UART_LCR_WLEN8); diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c index c67715f6f756..f9aa50ff14d4 100644 --- a/drivers/usb/cdns3/cdnsp-gadget.c +++ b/drivers/usb/cdns3/cdnsp-gadget.c @@ -600,11 +600,11 @@ int cdnsp_halt_endpoint(struct cdnsp_device *pdev,
trace_cdnsp_ep_halt(value ? "Set" : "Clear");
- if (value) { - ret = cdnsp_cmd_stop_ep(pdev, pep); - if (ret) - return ret; + ret = cdnsp_cmd_stop_ep(pdev, pep); + if (ret) + return ret;
+ if (value) { if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_STOPPED) { cdnsp_queue_halt_endpoint(pdev, pep->idx); cdnsp_ring_cmd_db(pdev); @@ -613,10 +613,6 @@ int cdnsp_halt_endpoint(struct cdnsp_device *pdev,
pep->ep_state |= EP_HALTED; } else { - /* - * In device mode driver can call reset endpoint command - * from any endpoint state. - */ cdnsp_queue_reset_ep(pdev, pep->idx); cdnsp_ring_cmd_db(pdev); ret = cdnsp_wait_for_cmd_compl(pdev); diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c index 794e413800ae..2f29431f612e 100644 --- a/drivers/usb/cdns3/cdnsp-ring.c +++ b/drivers/usb/cdns3/cdnsp-ring.c @@ -1763,10 +1763,15 @@ static u32 cdnsp_td_remainder(struct cdnsp_device *pdev, int trb_buff_len, unsigned int td_total_len, struct cdnsp_request *preq, - bool more_trbs_coming) + bool more_trbs_coming, + bool zlp) { u32 maxp, total_packet_count;
+ /* Before ZLP driver needs set TD_SIZE = 1. */ + if (zlp) + return 1; + /* One TRB with a zero-length data packet. */ if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) || trb_buff_len == td_total_len) @@ -1960,7 +1965,8 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) /* Set the TRB length, TD size, and interrupter fields. */ remainder = cdnsp_td_remainder(pdev, enqd_len, trb_buff_len, full_len, preq, - more_trbs_coming); + more_trbs_coming, + zero_len_trb);
length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); @@ -2025,7 +2031,7 @@ int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
if (preq->request.length > 0) { remainder = cdnsp_td_remainder(pdev, 0, preq->request.length, - preq->request.length, preq, 1); + preq->request.length, preq, 1, 0);
length_field = TRB_LEN(preq->request.length) | TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); @@ -2076,7 +2082,8 @@ int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep) u32 ep_state = GET_EP_CTX_STATE(pep->out_ctx); int ret = 0;
- if (ep_state == EP_STATE_STOPPED || ep_state == EP_STATE_DISABLED) { + if (ep_state == EP_STATE_STOPPED || ep_state == EP_STATE_DISABLED || + ep_state == EP_STATE_HALTED) { trace_cdnsp_ep_stopped_or_disabled(pep->out_ctx); goto ep_stopped; } @@ -2225,7 +2232,7 @@ static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev, /* Set the TRB length, TD size, & interrupter fields. */ remainder = cdnsp_td_remainder(pdev, running_total, trb_buff_len, td_len, preq, - more_trbs_coming); + more_trbs_coming, 0);
length_field = TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0);
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index 0ecf20eeceee..4be6a873bd07 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c @@ -37,15 +37,6 @@ struct dwc3_exynos { struct regulator *vdd10; };
-static int dwc3_exynos_remove_child(struct device *dev, void *unused) -{ - struct platform_device *pdev = to_platform_device(dev); - - platform_device_unregister(pdev); - - return 0; -} - static int dwc3_exynos_probe(struct platform_device *pdev) { struct dwc3_exynos *exynos; @@ -142,7 +133,7 @@ static int dwc3_exynos_remove(struct platform_device *pdev) struct dwc3_exynos *exynos = platform_get_drvdata(pdev); int i;
- device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child); + of_platform_depopulate(&pdev->dev);
for (i = exynos->num_clks - 1; i >= 0; i--) clk_disable_unprepare(exynos->clks[i]); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 0ed9826a4c47..6f61a288073b 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -965,7 +965,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action) return 0; }
-static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) +static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep, int status) { struct dwc3_request *req;
@@ -975,19 +975,19 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) while (!list_empty(&dep->started_list)) { req = next_request(&dep->started_list);
- dwc3_gadget_giveback(dep, req, -ESHUTDOWN); + dwc3_gadget_giveback(dep, req, status); }
while (!list_empty(&dep->pending_list)) { req = next_request(&dep->pending_list);
- dwc3_gadget_giveback(dep, req, -ESHUTDOWN); + dwc3_gadget_giveback(dep, req, status); }
while (!list_empty(&dep->cancelled_list)) { req = next_request(&dep->cancelled_list);
- dwc3_gadget_giveback(dep, req, -ESHUTDOWN); + dwc3_gadget_giveback(dep, req, status); } }
@@ -1016,18 +1016,18 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) reg &= ~DWC3_DALEPENA_EP(dep->number); dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+ dwc3_remove_requests(dwc, dep, -ESHUTDOWN); + + dep->stream_capable = false; + dep->type = 0; + dep->flags &= DWC3_EP_TXFIFO_RESIZED; + /* Clear out the ep descriptors for non-ep0 */ if (dep->number > 1) { dep->endpoint.comp_desc = NULL; dep->endpoint.desc = NULL; }
- dwc3_remove_requests(dwc, dep); - - dep->stream_capable = false; - dep->type = 0; - dep->flags &= DWC3_EP_TXFIFO_RESIZED; - return 0; }
@@ -2350,7 +2350,7 @@ static void dwc3_stop_active_transfers(struct dwc3 *dwc) if (!dep) continue;
- dwc3_remove_requests(dwc, dep); + dwc3_remove_requests(dwc, dep, -ESHUTDOWN); } }
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c index f422f9c58ba7..1ea6d2e5b218 100644 --- a/drivers/virt/coco/sev-guest/sev-guest.c +++ b/drivers/virt/coco/sev-guest/sev-guest.c @@ -67,8 +67,27 @@ static bool is_vmpck_empty(struct snp_guest_dev *snp_dev) return true; }
+/* + * If an error is received from the host or AMD Secure Processor (ASP) there + * are two options. Either retry the exact same encrypted request or discontinue + * using the VMPCK. + * + * This is because in the current encryption scheme GHCB v2 uses AES-GCM to + * encrypt the requests. The IV for this scheme is the sequence number. GCM + * cannot tolerate IV reuse. + * + * The ASP FW v1.51 only increments the sequence numbers on a successful + * guest<->ASP back and forth and only accepts messages at its exact sequence + * number. + * + * So if the sequence number were to be reused the encryption scheme is + * vulnerable. If the sequence number were incremented for a fresh IV the ASP + * will reject the request. + */ static void snp_disable_vmpck(struct snp_guest_dev *snp_dev) { + dev_alert(snp_dev->dev, "Disabling vmpck_id %d to prevent IV reuse.\n", + vmpck_id); memzero_explicit(snp_dev->vmpck, VMPCK_KEY_LEN); snp_dev->vmpck = NULL; } @@ -321,34 +340,71 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in if (rc) return rc;
- /* Call firmware to process the request */ + /* + * Call firmware to process the request. In this function the encrypted + * message enters shared memory with the host. So after this call the + * sequence number must be incremented or the VMPCK must be deleted to + * prevent reuse of the IV. + */ rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err); + + /* + * If the extended guest request fails due to having too small of a + * certificate data buffer, retry the same guest request without the + * extended data request in order to increment the sequence number + * and thus avoid IV reuse. + */ + if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST && + err == SNP_GUEST_REQ_INVALID_LEN) { + const unsigned int certs_npages = snp_dev->input.data_npages; + + exit_code = SVM_VMGEXIT_GUEST_REQUEST; + + /* + * If this call to the firmware succeeds, the sequence number can + * be incremented allowing for continued use of the VMPCK. If + * there is an error reflected in the return value, this value + * is checked further down and the result will be the deletion + * of the VMPCK and the error code being propagated back to the + * user as an ioctl() return code. + */ + rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err); + + /* + * Override the error to inform callers the given extended + * request buffer size was too small and give the caller the + * required buffer size. + */ + err = SNP_GUEST_REQ_INVALID_LEN; + snp_dev->input.data_npages = certs_npages; + } + if (fw_err) *fw_err = err;
- if (rc) - return rc; + if (rc) { + dev_alert(snp_dev->dev, + "Detected error from ASP request. rc: %d, fw_err: %llu\n", + rc, *fw_err); + goto disable_vmpck; + }
- /* - * The verify_and_dec_payload() will fail only if the hypervisor is - * actively modifying the message header or corrupting the encrypted payload. - * This hints that hypervisor is acting in a bad faith. Disable the VMPCK so that - * the key cannot be used for any communication. The key is disabled to ensure - * that AES-GCM does not use the same IV while encrypting the request payload. - */ rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz); if (rc) { dev_alert(snp_dev->dev, - "Detected unexpected decode failure, disabling the vmpck_id %d\n", - vmpck_id); - snp_disable_vmpck(snp_dev); - return rc; + "Detected unexpected decode failure from ASP. rc: %d\n", + rc); + goto disable_vmpck; }
/* Increment to new message sequence after payload decryption was successful. */ snp_inc_msg_seqno(snp_dev);
return 0; + +disable_vmpck: + snp_disable_vmpck(snp_dev); + return rc; }
static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c index 18f0ed8b1f93..6ebd819338ec 100644 --- a/drivers/xen/platform-pci.c +++ b/drivers/xen/platform-pci.c @@ -144,7 +144,7 @@ static int platform_pci_probe(struct pci_dev *pdev, if (ret) { dev_warn(&pdev->dev, "Unable to set the evtchn callback " "err=%d\n", ret); - goto out; + goto irq_out; } }
@@ -152,13 +152,16 @@ static int platform_pci_probe(struct pci_dev *pdev, grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); ret = gnttab_setup_auto_xlat_frames(grant_frames); if (ret) - goto out; + goto irq_out; ret = gnttab_init(); if (ret) goto grant_out; return 0; grant_out: gnttab_free_auto_xlat_frames(); +irq_out: + if (!xen_have_vector_callback) + free_irq(pdev->irq, pdev); out: pci_release_region(pdev, 0); mem_out: diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c index 5e53b4817f16..097316a74126 100644 --- a/drivers/xen/xen-pciback/conf_space_capability.c +++ b/drivers/xen/xen-pciback/conf_space_capability.c @@ -190,13 +190,16 @@ static const struct config_field caplist_pm[] = { };
static struct msi_msix_field_config { - u16 enable_bit; /* bit for enabling MSI/MSI-X */ - unsigned int int_type; /* interrupt type for exclusiveness check */ + u16 enable_bit; /* bit for enabling MSI/MSI-X */ + u16 allowed_bits; /* bits allowed to be changed */ + unsigned int int_type; /* interrupt type for exclusiveness check */ } msi_field_config = { .enable_bit = PCI_MSI_FLAGS_ENABLE, + .allowed_bits = PCI_MSI_FLAGS_ENABLE, .int_type = INTERRUPT_TYPE_MSI, }, msix_field_config = { .enable_bit = PCI_MSIX_FLAGS_ENABLE, + .allowed_bits = PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL, .int_type = INTERRUPT_TYPE_MSIX, };
@@ -229,7 +232,7 @@ static int msi_msix_flags_write(struct pci_dev *dev, int offset, u16 new_value, return 0;
if (!dev_data->allow_interrupt_control || - (new_value ^ old_value) & ~field_config->enable_bit) + (new_value ^ old_value) & ~field_config->allowed_bits) return PCIBIOS_SET_FAILED;
if (new_value & field_config->enable_bit) { diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index fe0cc816b4eb..4fff0067bd2a 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3105,6 +3105,8 @@ static int btrfs_ioctl_get_subvol_info(struct inode *inode, void __user *argp) } }
+ btrfs_free_path(path); + path = NULL; if (copy_to_user(argp, subvol_info, sizeof(*subvol_info))) ret = -EFAULT;
@@ -3194,6 +3196,8 @@ static int btrfs_ioctl_get_subvol_rootref(struct btrfs_root *root, }
out: + btrfs_free_path(path); + if (!ret || ret == -EOVERFLOW) { rootrefs->num_items = found; /* update min_treeid for next search */ @@ -3205,7 +3209,6 @@ static int btrfs_ioctl_get_subvol_rootref(struct btrfs_root *root, }
kfree(rootrefs); - btrfs_free_path(path);
return ret; } @@ -4231,6 +4234,8 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg) ipath->fspath->val[i] = rel_ptr; }
+ btrfs_free_path(path); + path = NULL; ret = copy_to_user((void __user *)(unsigned long)ipa->fspath, ipath->fspath, size); if (ret) { @@ -4281,21 +4286,20 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info, size = min_t(u32, loi->size, SZ_16M); }
- path = btrfs_alloc_path(); - if (!path) { - ret = -ENOMEM; - goto out; - } - inodes = init_data_container(size); if (IS_ERR(inodes)) { ret = PTR_ERR(inodes); - inodes = NULL; - goto out; + goto out_loi; }
+ path = btrfs_alloc_path(); + if (!path) { + ret = -ENOMEM; + goto out; + } ret = iterate_inodes_from_logical(loi->logical, fs_info, path, inodes, ignore_offset); + btrfs_free_path(path); if (ret == -EINVAL) ret = -ENOENT; if (ret < 0) @@ -4307,7 +4311,6 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info, ret = -EFAULT;
out: - btrfs_free_path(path); kvfree(inodes); out_loi: kfree(loi); diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index d5d0717fd09a..00b97e6eb507 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -2251,8 +2251,11 @@ int __init btrfs_init_sysfs(void)
#ifdef CONFIG_BTRFS_DEBUG ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_debug_feature_attr_group); - if (ret) - goto out2; + if (ret) { + sysfs_unmerge_group(&btrfs_kset->kobj, + &btrfs_static_feature_attr_group); + goto out_remove_group; + } #endif
return 0; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 9205c4a5ca81..11f70ba98140 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3834,15 +3834,29 @@ static int process_dir_items_leaf(struct btrfs_trans_handle *trans, u64 *last_old_dentry_offset) { struct btrfs_root *log = inode->root->log_root; - struct extent_buffer *src = path->nodes[0]; - const int nritems = btrfs_header_nritems(src); + struct extent_buffer *src; + const int nritems = btrfs_header_nritems(path->nodes[0]); const u64 ino = btrfs_ino(inode); bool last_found = false; int batch_start = 0; int batch_size = 0; int i;
- for (i = path->slots[0]; i < nritems; i++) { + /* + * We need to clone the leaf, release the read lock on it, and use the + * clone before modifying the log tree. See the comment at copy_items() + * about why we need to do this. + */ + src = btrfs_clone_extent_buffer(path->nodes[0]); + if (!src) + return -ENOMEM; + + i = path->slots[0]; + btrfs_release_path(path); + path->nodes[0] = src; + path->slots[0] = i; + + for (; i < nritems; i++) { struct btrfs_dir_item *di; struct btrfs_key key; int ret; @@ -4414,7 +4428,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, { struct btrfs_root *log = inode->root->log_root; struct btrfs_file_extent_item *extent; - struct extent_buffer *src = src_path->nodes[0]; + struct extent_buffer *src; int ret = 0; struct btrfs_key *ins_keys; u32 *ins_sizes; @@ -4425,6 +4439,43 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, const bool skip_csum = (inode->flags & BTRFS_INODE_NODATASUM); const u64 i_size = i_size_read(&inode->vfs_inode);
+ /* + * To keep lockdep happy and avoid deadlocks, clone the source leaf and + * use the clone. This is because otherwise we would be changing the log + * tree, to insert items from the subvolume tree or insert csum items, + * while holding a read lock on a leaf from the subvolume tree, which + * creates a nasty lock dependency when COWing log tree nodes/leaves: + * + * 1) Modifying the log tree triggers an extent buffer allocation while + * holding a write lock on a parent extent buffer from the log tree. + * Allocating the pages for an extent buffer, or the extent buffer + * struct, can trigger inode eviction and finally the inode eviction + * will trigger a release/remove of a delayed node, which requires + * taking the delayed node's mutex; + * + * 2) Allocating a metadata extent for a log tree can trigger the async + * reclaim thread and make us wait for it to release enough space and + * unblock our reservation ticket. The reclaim thread can start + * flushing delayed items, and that in turn results in the need to + * lock delayed node mutexes and in the need to write lock extent + * buffers of a subvolume tree - all this while holding a write lock + * on the parent extent buffer in the log tree. + * + * So one task in scenario 1) running in parallel with another task in + * scenario 2) could lead to a deadlock, one wanting to lock a delayed + * node mutex while having a read lock on a leaf from the subvolume, + * while the other is holding the delayed node's mutex and wants to + * write lock the same subvolume leaf for flushing delayed items. + */ + src = btrfs_clone_extent_buffer(src_path->nodes[0]); + if (!src) + return -ENOMEM; + + i = src_path->slots[0]; + btrfs_release_path(src_path); + src_path->nodes[0] = src; + src_path->slots[0] = i; + ins_data = kmalloc(nr * sizeof(struct btrfs_key) + nr * sizeof(u32), GFP_NOFS); if (!ins_data) diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index a227d27a63bf..9ea40ddcc425 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -134,7 +134,8 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones, super[i] = page_address(page[i]); }
- if (super[0]->generation > super[1]->generation) + if (btrfs_super_generation(super[0]) > + btrfs_super_generation(super[1])) sector = zones[1].start; else sector = zones[0].start; @@ -466,7 +467,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache) goto out; }
- zones = kcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL); + zones = kvcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL); if (!zones) { ret = -ENOMEM; goto out; @@ -585,7 +586,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache) }
- kfree(zones); + kvfree(zones);
switch (bdev_zoned_model(bdev)) { case BLK_ZONED_HM: @@ -617,7 +618,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache) return 0;
out: - kfree(zones); + kvfree(zones); out_free_zone_info: btrfs_destroy_dev_zone_info(device);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 53cfe026b3ea..02b5c0ac5654 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2247,7 +2247,6 @@ static int flush_mdlog_and_wait_inode_unsafe_requests(struct inode *inode) struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_mds_request *req1 = NULL, *req2 = NULL; - unsigned int max_sessions; int ret, err = 0;
spin_lock(&ci->i_unsafe_lock); @@ -2265,28 +2264,24 @@ static int flush_mdlog_and_wait_inode_unsafe_requests(struct inode *inode) } spin_unlock(&ci->i_unsafe_lock);
- /* - * The mdsc->max_sessions is unlikely to be changed - * mostly, here we will retry it by reallocating the - * sessions array memory to get rid of the mdsc->mutex - * lock. - */ -retry: - max_sessions = mdsc->max_sessions; - /* * Trigger to flush the journal logs in all the relevant MDSes * manually, or in the worst case we must wait at most 5 seconds * to wait the journal logs to be flushed by the MDSes periodically. */ - if ((req1 || req2) && likely(max_sessions)) { - struct ceph_mds_session **sessions = NULL; - struct ceph_mds_session *s; + if (req1 || req2) { struct ceph_mds_request *req; + struct ceph_mds_session **sessions; + struct ceph_mds_session *s; + unsigned int max_sessions; int i;
- sessions = kzalloc(max_sessions * sizeof(s), GFP_KERNEL); + mutex_lock(&mdsc->mutex); + max_sessions = mdsc->max_sessions; + + sessions = kcalloc(max_sessions, sizeof(s), GFP_KERNEL); if (!sessions) { + mutex_unlock(&mdsc->mutex); err = -ENOMEM; goto out; } @@ -2298,16 +2293,6 @@ static int flush_mdlog_and_wait_inode_unsafe_requests(struct inode *inode) s = req->r_session; if (!s) continue; - if (unlikely(s->s_mds >= max_sessions)) { - spin_unlock(&ci->i_unsafe_lock); - for (i = 0; i < max_sessions; i++) { - s = sessions[i]; - if (s) - ceph_put_mds_session(s); - } - kfree(sessions); - goto retry; - } if (!sessions[s->s_mds]) { s = ceph_get_mds_session(s); sessions[s->s_mds] = s; @@ -2320,16 +2305,6 @@ static int flush_mdlog_and_wait_inode_unsafe_requests(struct inode *inode) s = req->r_session; if (!s) continue; - if (unlikely(s->s_mds >= max_sessions)) { - spin_unlock(&ci->i_unsafe_lock); - for (i = 0; i < max_sessions; i++) { - s = sessions[i]; - if (s) - ceph_put_mds_session(s); - } - kfree(sessions); - goto retry; - } if (!sessions[s->s_mds]) { s = ceph_get_mds_session(s); sessions[s->s_mds] = s; @@ -2341,11 +2316,12 @@ static int flush_mdlog_and_wait_inode_unsafe_requests(struct inode *inode) /* the auth MDS */ spin_lock(&ci->i_ceph_lock); if (ci->i_auth_cap) { - s = ci->i_auth_cap->session; - if (!sessions[s->s_mds]) - sessions[s->s_mds] = ceph_get_mds_session(s); + s = ci->i_auth_cap->session; + if (!sessions[s->s_mds]) + sessions[s->s_mds] = ceph_get_mds_session(s); } spin_unlock(&ci->i_ceph_lock); + mutex_unlock(&mdsc->mutex);
/* send flush mdlog request to MDSes */ for (i = 0; i < max_sessions; i++) { diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 6bc8be9ed2a5..ccad85feb24e 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -1252,7 +1252,7 @@ ssize_t cifs_file_copychunk_range(unsigned int xid, rc = filemap_write_and_wait_range(src_inode->i_mapping, off, off + len - 1); if (rc) - goto out; + goto unlock;
/* should we flush first and last page first */ truncate_inode_pages(&target_inode->i_data, 0); @@ -1268,6 +1268,8 @@ ssize_t cifs_file_copychunk_range(unsigned int xid, * that target is updated on the server */ CIFS_I(target_inode)->time = 0; + +unlock: /* although unlocking in the reverse order from locking is not * strictly necessary here it is a little cleaner to be consistent */ diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 11cd06aa74f0..6a85136da27c 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -302,14 +302,14 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
/* now drop the ref to the current iface */ if (old_iface && iface) { - kref_put(&old_iface->refcount, release_iface); cifs_dbg(FYI, "replacing iface: %pIS with %pIS\n", &old_iface->sockaddr, &iface->sockaddr); - } else if (old_iface) { kref_put(&old_iface->refcount, release_iface); + } else if (old_iface) { cifs_dbg(FYI, "releasing ref to iface: %pIS\n", &old_iface->sockaddr); + kref_put(&old_iface->refcount, release_iface); } else { WARN_ON(!iface); cifs_dbg(FYI, "adding new iface: %pIS\n", &iface->sockaddr); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 5235974126bd..8378b86c1b49 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -5183,6 +5183,7 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, * and it is decreased till we reach start. */ again: + ret = 0; if (SHIFT == SHIFT_LEFT) iterator = &start; else @@ -5226,14 +5227,21 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, ext4_ext_get_actual_len(extent); } else { extent = EXT_FIRST_EXTENT(path[depth].p_hdr); - if (le32_to_cpu(extent->ee_block) > 0) + if (le32_to_cpu(extent->ee_block) > start) *iterator = le32_to_cpu(extent->ee_block) - 1; - else - /* Beginning is reached, end of the loop */ + else if (le32_to_cpu(extent->ee_block) == start) iterator = NULL; - /* Update path extent in case we need to stop */ - while (le32_to_cpu(extent->ee_block) < start) + else { + extent = EXT_LAST_EXTENT(path[depth].p_hdr); + while (le32_to_cpu(extent->ee_block) >= start) + extent--; + + if (extent == EXT_LAST_EXTENT(path[depth].p_hdr)) + break; + extent++; + iterator = NULL; + } path[depth].p_ext = extent; } ret = ext4_ext_shift_path_extents(path, shift, inode, diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 443f83382b9b..9958d4020771 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -1712,18 +1712,26 @@ static int writeback_single_inode(struct inode *inode, wb = inode_to_wb_and_lock_list(inode); spin_lock(&inode->i_lock); /* - * If the inode is now fully clean, then it can be safely removed from - * its writeback list (if any). Otherwise the flusher threads are - * responsible for the writeback lists. + * If the inode is freeing, its i_io_list shoudn't be updated + * as it can be finally deleted at this moment. */ - if (!(inode->i_state & I_DIRTY_ALL)) - inode_cgwb_move_to_attached(inode, wb); - else if (!(inode->i_state & I_SYNC_QUEUED)) { - if ((inode->i_state & I_DIRTY)) - redirty_tail_locked(inode, wb); - else if (inode->i_state & I_DIRTY_TIME) { - inode->dirtied_when = jiffies; - inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); + if (!(inode->i_state & I_FREEING)) { + /* + * If the inode is now fully clean, then it can be safely + * removed from its writeback list (if any). Otherwise the + * flusher threads are responsible for the writeback lists. + */ + if (!(inode->i_state & I_DIRTY_ALL)) + inode_cgwb_move_to_attached(inode, wb); + else if (!(inode->i_state & I_SYNC_QUEUED)) { + if ((inode->i_state & I_DIRTY)) + redirty_tail_locked(inode, wb); + else if (inode->i_state & I_DIRTY_TIME) { + inode->dirtied_when = jiffies; + inode_io_list_move_locked(inode, + wb, + &wb->b_dirty_time); + } } }
diff --git a/fs/fscache/volume.c b/fs/fscache/volume.c index a058e0136bfe..ab8ceddf9efa 100644 --- a/fs/fscache/volume.c +++ b/fs/fscache/volume.c @@ -203,7 +203,11 @@ static struct fscache_volume *fscache_alloc_volume(const char *volume_key, struct fscache_volume *volume; struct fscache_cache *cache; size_t klen, hlen; - char *key; + u8 *key; + + klen = strlen(volume_key); + if (klen > NAME_MAX) + return NULL;
if (!coherency_data) coherency_len = 0; @@ -229,7 +233,6 @@ static struct fscache_volume *fscache_alloc_volume(const char *volume_key, /* Stick the length on the front of the key and pad it out to make * hashing easier. */ - klen = strlen(volume_key); hlen = round_up(1 + klen + 1, sizeof(__le32)); key = kzalloc(hlen, GFP_KERNEL); if (!key) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 71bfb663aac5..89f4741728ba 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -2963,11 +2963,9 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, .mode = mode }; int err; - bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) || - (mode & (FALLOC_FL_PUNCH_HOLE | - FALLOC_FL_ZERO_RANGE)); - - bool block_faults = FUSE_IS_DAX(inode) && lock_inode; + bool block_faults = FUSE_IS_DAX(inode) && + (!(mode & FALLOC_FL_KEEP_SIZE) || + (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)));
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) @@ -2976,22 +2974,20 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, if (fm->fc->no_fallocate) return -EOPNOTSUPP;
- if (lock_inode) { - inode_lock(inode); - if (block_faults) { - filemap_invalidate_lock(inode->i_mapping); - err = fuse_dax_break_layouts(inode, 0, 0); - if (err) - goto out; - } + inode_lock(inode); + if (block_faults) { + filemap_invalidate_lock(inode->i_mapping); + err = fuse_dax_break_layouts(inode, 0, 0); + if (err) + goto out; + }
- if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) { - loff_t endbyte = offset + length - 1; + if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) { + loff_t endbyte = offset + length - 1;
- err = fuse_writeback_range(inode, offset, endbyte); - if (err) - goto out; - } + err = fuse_writeback_range(inode, offset, endbyte); + if (err) + goto out; }
if (!(mode & FALLOC_FL_KEEP_SIZE) && @@ -3039,8 +3035,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, if (block_faults) filemap_invalidate_unlock(inode->i_mapping);
- if (lock_inode) - inode_unlock(inode); + inode_unlock(inode);
fuse_flush_time_update(inode);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index fc17b0ac8729..f3cd614e1f1e 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -847,10 +847,11 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct svc_rqst *rqstp = sd->u.data; struct page *page = buf->page; // may be a compound one unsigned offset = buf->offset; + struct page *last_page;
- page += offset / PAGE_SIZE; - for (int i = sd->len; i > 0; i -= PAGE_SIZE) - svc_rqst_replace_page(rqstp, page++); + last_page = page + (offset + sd->len - 1) / PAGE_SIZE; + for (page += offset / PAGE_SIZE; page <= last_page; page++) + svc_rqst_replace_page(rqstp, page); if (rqstp->rq_res.page_len == 0) // first call rqstp->rq_res.page_base = offset % PAGE_SIZE; rqstp->rq_res.page_len += sd->len; diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c index 77ff8e95421f..dc359b56fdfa 100644 --- a/fs/nilfs2/sufile.c +++ b/fs/nilfs2/sufile.c @@ -495,14 +495,22 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum) { struct buffer_head *bh; + void *kaddr; + struct nilfs_segment_usage *su; int ret;
+ down_write(&NILFS_MDT(sufile)->mi_sem); ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); if (!ret) { mark_buffer_dirty(bh); nilfs_mdt_mark_dirty(sufile); + kaddr = kmap_atomic(bh->b_page); + su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); + nilfs_segment_usage_set_dirty(su); + kunmap_atomic(kaddr); brelse(bh); } + up_write(&NILFS_MDT(sufile)->mi_sem); return ret; }
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index 860f0b1032c6..2c53fbb8d918 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -40,6 +40,13 @@ static void zonefs_account_active(struct inode *inode) if (zi->i_ztype != ZONEFS_ZTYPE_SEQ) return;
+ /* + * For zones that transitioned to the offline or readonly condition, + * we only need to clear the active state. + */ + if (zi->i_flags & (ZONEFS_ZONE_OFFLINE | ZONEFS_ZONE_READONLY)) + goto out; + /* * If the zone is active, that is, if it is explicitly open or * partially written, check if it was already accounted as active. @@ -53,6 +60,7 @@ static void zonefs_account_active(struct inode *inode) return; }
+out: /* The zone is not active. If it was, update the active count */ if (zi->i_flags & ZONEFS_ZONE_ACTIVE) { zi->i_flags &= ~ZONEFS_ZONE_ACTIVE; @@ -324,6 +332,7 @@ static loff_t zonefs_check_zone_condition(struct inode *inode, inode->i_flags |= S_IMMUTABLE; inode->i_mode &= ~0777; zone->wp = zone->start; + zi->i_flags |= ZONEFS_ZONE_OFFLINE; return 0; case BLK_ZONE_COND_READONLY: /* @@ -342,8 +351,10 @@ static loff_t zonefs_check_zone_condition(struct inode *inode, zone->cond = BLK_ZONE_COND_OFFLINE; inode->i_mode &= ~0777; zone->wp = zone->start; + zi->i_flags |= ZONEFS_ZONE_OFFLINE; return 0; } + zi->i_flags |= ZONEFS_ZONE_READONLY; inode->i_mode &= ~0222; return i_size_read(inode); case BLK_ZONE_COND_FULL: @@ -478,14 +489,22 @@ static void __zonefs_io_error(struct inode *inode, bool write) struct super_block *sb = inode->i_sb; struct zonefs_sb_info *sbi = ZONEFS_SB(sb); unsigned int noio_flag; - unsigned int nr_zones = - zi->i_zone_size >> (sbi->s_zone_sectors_shift + SECTOR_SHIFT); + unsigned int nr_zones = 1; struct zonefs_ioerr_data err = { .inode = inode, .write = write, }; int ret;
+ /* + * The only files that have more than one zone are conventional zone + * files with aggregated conventional zones, for which the inode zone + * size is always larger than the device zone size. + */ + if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev)) + nr_zones = zi->i_zone_size >> + (sbi->s_zone_sectors_shift + SECTOR_SHIFT); + /* * Memory allocations in blkdev_report_zones() can trigger a memory * reclaim which may in turn cause a recursion into zonefs as well as @@ -1407,6 +1426,14 @@ static int zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone, zi->i_ztype = type; zi->i_zsector = zone->start; zi->i_zone_size = zone->len << SECTOR_SHIFT; + if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT && + !(sbi->s_features & ZONEFS_F_AGGRCNV)) { + zonefs_err(sb, + "zone size %llu doesn't match device's zone sectors %llu\n", + zi->i_zone_size, + bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT); + return -EINVAL; + }
zi->i_max_size = min_t(loff_t, MAX_LFS_FILESIZE, zone->capacity << SECTOR_SHIFT); @@ -1456,11 +1483,11 @@ static struct dentry *zonefs_create_inode(struct dentry *parent, struct inode *dir = d_inode(parent); struct dentry *dentry; struct inode *inode; - int ret; + int ret = -ENOMEM;
dentry = d_alloc_name(parent, name); if (!dentry) - return NULL; + return ERR_PTR(ret);
inode = new_inode(parent->d_sb); if (!inode) @@ -1485,7 +1512,7 @@ static struct dentry *zonefs_create_inode(struct dentry *parent, dput: dput(dentry);
- return NULL; + return ERR_PTR(ret); }
struct zonefs_zone_data { @@ -1505,7 +1532,7 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd, struct blk_zone *zone, *next, *end; const char *zgroup_name; char *file_name; - struct dentry *dir; + struct dentry *dir, *dent; unsigned int n = 0; int ret;
@@ -1523,8 +1550,8 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd, zgroup_name = "seq";
dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type); - if (!dir) { - ret = -ENOMEM; + if (IS_ERR(dir)) { + ret = PTR_ERR(dir); goto free; }
@@ -1570,8 +1597,9 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd, * Use the file number within its group as file name. */ snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n); - if (!zonefs_create_inode(dir, file_name, zone, type)) { - ret = -ENOMEM; + dent = zonefs_create_inode(dir, file_name, zone, type); + if (IS_ERR(dent)) { + ret = PTR_ERR(dent); goto free; }
@@ -1905,18 +1933,18 @@ static int __init zonefs_init(void) if (ret) return ret;
- ret = register_filesystem(&zonefs_type); + ret = zonefs_sysfs_init(); if (ret) goto destroy_inodecache;
- ret = zonefs_sysfs_init(); + ret = register_filesystem(&zonefs_type); if (ret) - goto unregister_fs; + goto sysfs_exit;
return 0;
-unregister_fs: - unregister_filesystem(&zonefs_type); +sysfs_exit: + zonefs_sysfs_exit(); destroy_inodecache: zonefs_destroy_inodecache();
@@ -1925,9 +1953,9 @@ static int __init zonefs_init(void)
static void __exit zonefs_exit(void) { + unregister_filesystem(&zonefs_type); zonefs_sysfs_exit(); zonefs_destroy_inodecache(); - unregister_filesystem(&zonefs_type); }
MODULE_AUTHOR("Damien Le Moal"); diff --git a/fs/zonefs/zonefs.h b/fs/zonefs/zonefs.h index 4b3de66c3233..1dbe78119ff1 100644 --- a/fs/zonefs/zonefs.h +++ b/fs/zonefs/zonefs.h @@ -39,8 +39,10 @@ static inline enum zonefs_ztype zonefs_zone_type(struct blk_zone *zone) return ZONEFS_ZTYPE_SEQ; }
-#define ZONEFS_ZONE_OPEN (1 << 0) -#define ZONEFS_ZONE_ACTIVE (1 << 1) +#define ZONEFS_ZONE_OPEN (1U << 0) +#define ZONEFS_ZONE_ACTIVE (1U << 1) +#define ZONEFS_ZONE_OFFLINE (1U << 2) +#define ZONEFS_ZONE_READONLY (1U << 3)
/* * In-memory inode data. diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 79624711fda7..e6bf06dc0770 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -946,7 +946,6 @@ extern void blk_queue_io_min(struct request_queue *q, unsigned int min); extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); -extern void blk_set_default_limits(struct queue_limits *lim); extern void blk_set_stacking_limits(struct queue_limits *lim); extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, sector_t offset); diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 73662fbabd78..e811e2f99a61 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -27,6 +27,7 @@ #include <linux/bpfptr.h> #include <linux/btf.h> #include <linux/rcupdate_trace.h> +#include <linux/static_call.h>
struct bpf_verifier_env; struct bpf_verifier_log; @@ -894,6 +895,10 @@ struct bpf_dispatcher { void *rw_image; u32 image_off; struct bpf_ksym ksym; +#ifdef CONFIG_HAVE_STATIC_CALL + struct static_call_key *sc_key; + void *sc_tramp; +#endif };
static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func( @@ -911,6 +916,34 @@ struct bpf_trampoline *bpf_trampoline_get(u64 key, struct bpf_attach_target_info *tgt_info); void bpf_trampoline_put(struct bpf_trampoline *tr); int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs); + +/* + * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn + * indirection with a direct call to the bpf program. If the architecture does + * not have STATIC_CALL, avoid a double-indirection. + */ +#ifdef CONFIG_HAVE_STATIC_CALL + +#define __BPF_DISPATCHER_SC_INIT(_name) \ + .sc_key = &STATIC_CALL_KEY(_name), \ + .sc_tramp = STATIC_CALL_TRAMP_ADDR(_name), + +#define __BPF_DISPATCHER_SC(name) \ + DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func) + +#define __BPF_DISPATCHER_CALL(name) \ + static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func) + +#define __BPF_DISPATCHER_UPDATE(_d, _new) \ + __static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new)) + +#else +#define __BPF_DISPATCHER_SC_INIT(name) +#define __BPF_DISPATCHER_SC(name) +#define __BPF_DISPATCHER_CALL(name) bpf_func(ctx, insnsi) +#define __BPF_DISPATCHER_UPDATE(_d, _new) +#endif + #define BPF_DISPATCHER_INIT(_name) { \ .mutex = __MUTEX_INITIALIZER(_name.mutex), \ .func = &_name##_func, \ @@ -922,25 +955,29 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func .name = #_name, \ .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \ }, \ + __BPF_DISPATCHER_SC_INIT(_name##_call) \ }
#define DEFINE_BPF_DISPATCHER(name) \ + __BPF_DISPATCHER_SC(name); \ noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \ const void *ctx, \ const struct bpf_insn *insnsi, \ bpf_func_t bpf_func) \ { \ - return bpf_func(ctx, insnsi); \ + return __BPF_DISPATCHER_CALL(name); \ } \ EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \ struct bpf_dispatcher bpf_dispatcher_##name = \ BPF_DISPATCHER_INIT(bpf_dispatcher_##name); + #define DECLARE_BPF_DISPATCHER(name) \ unsigned int bpf_dispatcher_##name##_func( \ const void *ctx, \ const struct bpf_insn *insnsi, \ bpf_func_t bpf_func); \ extern struct bpf_dispatcher bpf_dispatcher_##name; + #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name) void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 9f6e25467844..444236dadcf0 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -20,7 +20,6 @@ struct fault_attr { atomic_t space; unsigned long verbose; bool task_filter; - bool no_warn; unsigned long stacktrace_depth; unsigned long require_start; unsigned long require_end; @@ -32,6 +31,10 @@ struct fault_attr { struct dentry *dname; };
+enum fault_flags { + FAULT_NOWARN = 1 << 0, +}; + #define FAULT_ATTR_INITIALIZER { \ .interval = 1, \ .times = ATOMIC_INIT(1), \ @@ -40,11 +43,11 @@ struct fault_attr { .ratelimit_state = RATELIMIT_STATE_INIT_DISABLED, \ .verbose = 2, \ .dname = NULL, \ - .no_warn = false, \ }
#define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER int setup_fault_attr(struct fault_attr *attr, char *str); +bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags); bool should_fail(struct fault_attr *attr, ssize_t size);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 36e5dd84cf59..8e312c8323a8 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -75,7 +75,7 @@ struct fscache_volume { atomic_t n_accesses; /* Number of cache accesses in progress */ unsigned int debug_id; unsigned int key_hash; /* Hash of key string */ - char *key; /* Volume ID, eg. "afs@example.com@1234" */ + u8 *key; /* Volume ID, eg. "afs@example.com@1234" */ struct list_head proc_link; /* Link in /proc/fs/fscache/volumes */ struct hlist_bl_node hash_link; /* Link in hash table */ struct work_struct work; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 454dab40baf6..2d56cfe0911d 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -984,6 +984,7 @@ struct mlx5_async_work { struct mlx5_async_ctx *ctx; mlx5_async_cbk_t user_callback; u16 opcode; /* cmd opcode */ + u16 op_mod; /* cmd op_mod */ void *out; /* pointer to the cmd output buffer */ };
diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 3827a6b395fd..bce6b228cf56 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -83,7 +83,7 @@ struct neigh_parms { struct rcu_head rcu_head;
int reachable_time; - int qlen; + u32 qlen; int data[NEIGH_VAR_DATA_MAX]; DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX); }; diff --git a/include/sound/sof/info.h b/include/sound/sof/info.h index 65e86e4e9fd8..75193850ead0 100644 --- a/include/sound/sof/info.h +++ b/include/sound/sof/info.h @@ -36,6 +36,10 @@ enum sof_ipc_ext_data { SOF_IPC_EXT_USER_ABI_INFO = 4, };
+/* Build u32 number in format MMmmmppp */ +#define SOF_FW_VER(MAJOR, MINOR, PATCH) ((uint32_t)( \ + ((MAJOR) << 24) | ((MINOR) << 12) | (PATCH))) + /* FW version - SOF_IPC_GLB_VERSION */ struct sof_ipc_fw_version { struct sof_ipc_hdr hdr; diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index 7c1dc818b1d5..d676ed2b246e 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -187,7 +187,7 @@ #define AUDIT_MAX_KEY_LEN 256 #define AUDIT_BITMASK_SIZE 64 #define AUDIT_WORD(nr) ((__u32)((nr)/32)) -#define AUDIT_BIT(nr) (1 << ((nr) - AUDIT_WORD(nr)*32)) +#define AUDIT_BIT(nr) (1U << ((nr) - AUDIT_WORD(nr)*32))
#define AUDIT_SYSCALL_CLASSES 16 #define AUDIT_CLASS_DIR_WRITE 0 diff --git a/init/Kconfig b/init/Kconfig index 532362fcfe31..d1d779d6ba43 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -76,7 +76,7 @@ config CC_HAS_ASM_GOTO_OUTPUT config CC_HAS_ASM_GOTO_TIED_OUTPUT depends on CC_HAS_ASM_GOTO_OUTPUT # Detect buggy gcc and clang, fixed in gcc-11 clang-14. - def_bool $(success,echo 'int foo(int *x) { asm goto (".long (%l[bar]) - .\n": "+m"(*x) ::: bar); return *x; bar: return 0; }' | $CC -x c - -c -o /dev/null) + def_bool $(success,echo 'int foo(int *x) { asm goto (".long (%l[bar]) - .": "+m"(*x) ::: bar); return *x; bar: return 0; }' | $CC -x c - -c -o /dev/null)
config TOOLS_SUPPORT_RELR def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh) diff --git a/io_uring/filetable.c b/io_uring/filetable.c index 7b473259f3f4..68dfc6936aa7 100644 --- a/io_uring/filetable.c +++ b/io_uring/filetable.c @@ -101,8 +101,6 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file, err: if (needs_switch) io_rsrc_node_switch(ctx, ctx->file_data); - if (ret) - fput(file); return ret; }
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 1b552d213763..585f97b31a93 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -229,9 +229,14 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
static inline bool io_run_task_work(void) { + /* + * Always check-and-clear the task_work notification signal. With how + * signaling works for task_work, we can find it set with nothing to + * run. We need to clear it for that case, like get_signal() does. + */ + if (test_thread_flag(TIF_NOTIFY_SIGNAL)) + clear_notify_signal(); if (task_work_pending(current)) { - if (test_thread_flag(TIF_NOTIFY_SIGNAL)) - clear_notify_signal(); __set_current_state(TASK_RUNNING); task_work_run(); return 1; diff --git a/io_uring/poll.c b/io_uring/poll.c index ba0f68466930..d9bf1767867e 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -40,7 +40,14 @@ struct io_poll_table { };
#define IO_POLL_CANCEL_FLAG BIT(31) -#define IO_POLL_REF_MASK GENMASK(30, 0) +#define IO_POLL_RETRY_FLAG BIT(30) +#define IO_POLL_REF_MASK GENMASK(29, 0) + +/* + * We usually have 1-2 refs taken, 128 is more than enough and we want to + * maximise the margin between this amount and the moment when it overflows. + */ +#define IO_POLL_REF_BIAS 128
#define IO_WQE_F_DOUBLE 1
@@ -58,6 +65,21 @@ static inline bool wqe_is_double(struct wait_queue_entry *wqe) return priv & IO_WQE_F_DOUBLE; }
+static bool io_poll_get_ownership_slowpath(struct io_kiocb *req) +{ + int v; + + /* + * poll_refs are already elevated and we don't have much hope for + * grabbing the ownership. Instead of incrementing set a retry flag + * to notify the loop that there might have been some change. + */ + v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs); + if (v & IO_POLL_REF_MASK) + return false; + return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); +} + /* * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can * bump it and acquire ownership. It's disallowed to modify requests while not @@ -66,6 +88,8 @@ static inline bool wqe_is_double(struct wait_queue_entry *wqe) */ static inline bool io_poll_get_ownership(struct io_kiocb *req) { + if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS)) + return io_poll_get_ownership_slowpath(req); return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); }
@@ -116,6 +140,8 @@ static void io_poll_req_insert_locked(struct io_kiocb *req) struct io_hash_table *table = &req->ctx->cancel_table_locked; u32 index = hash_long(req->cqe.user_data, table->hash_bits);
+ lockdep_assert_held(&req->ctx->uring_lock); + hlist_add_head(&req->hash_node, &table->hbs[index].list); }
@@ -233,6 +259,16 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked) */ if ((v & IO_POLL_REF_MASK) != 1) req->cqe.res = 0; + if (v & IO_POLL_RETRY_FLAG) { + req->cqe.res = 0; + /* + * We won't find new events that came in between + * vfs_poll and the ref put unless we clear the flag + * in advance. + */ + atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs); + v &= ~IO_POLL_RETRY_FLAG; + }
/* the mask was stashed in __io_poll_execute */ if (!req->cqe.res) { @@ -272,7 +308,8 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked) * Release all references, retry if someone tried to restart * task_work while we were executing it. */ - } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs)); + } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) & + IO_POLL_REF_MASK);
return IOU_POLL_NO_ACTION; } @@ -516,7 +553,6 @@ static int __io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) { struct io_ring_ctx *ctx = req->ctx; - int v;
INIT_HLIST_NODE(&req->hash_node); req->work.cancel_seq = atomic_read(&ctx->cancel_seq); @@ -584,11 +620,10 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
if (ipt->owning) { /* - * Release ownership. If someone tried to queue a tw while it was - * locked, kick it off for them. + * Try to release ownership. If we see a change of state, e.g. + * poll was waken up, queue up a tw, it'll deal with it. */ - v = atomic_dec_return(&req->poll_refs); - if (unlikely(v & IO_POLL_REF_MASK)) + if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1) __io_poll_execute(req, 0); } return 0; diff --git a/kernel/bpf/dispatcher.c b/kernel/bpf/dispatcher.c index fa64b80b8bca..c19719f48ce0 100644 --- a/kernel/bpf/dispatcher.c +++ b/kernel/bpf/dispatcher.c @@ -4,6 +4,7 @@ #include <linux/hash.h> #include <linux/bpf.h> #include <linux/filter.h> +#include <linux/static_call.h>
/* The BPF dispatcher is a multiway branch code generator. The * dispatcher is a mechanism to avoid the performance penalty of an @@ -104,17 +105,11 @@ static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image, void *b
static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs) { - void *old, *new, *tmp; - u32 noff; - int err; - - if (!prev_num_progs) { - old = NULL; - noff = 0; - } else { - old = d->image + d->image_off; + void *new, *tmp; + u32 noff = 0; + + if (prev_num_progs) noff = d->image_off ^ (PAGE_SIZE / 2); - }
new = d->num_progs ? d->image + noff : NULL; tmp = d->num_progs ? d->rw_image + noff : NULL; @@ -128,11 +123,10 @@ static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs) return; }
- err = bpf_arch_text_poke(d->func, BPF_MOD_JUMP, old, new); - if (err || !new) - return; + __BPF_DISPATCHER_UPDATE(d, new ?: (void *)&bpf_dispatcher_nop_func);
- d->image_off = noff; + if (new) + d->image_off = noff; }
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, diff --git a/kernel/gcov/clang.c b/kernel/gcov/clang.c index cbb0bed958ab..7670a811a565 100644 --- a/kernel/gcov/clang.c +++ b/kernel/gcov/clang.c @@ -280,6 +280,8 @@ void gcov_info_add(struct gcov_info *dst, struct gcov_info *src)
for (i = 0; i < sfn_ptr->num_counters; i++) dfn_ptr->counters[i] += sfn_ptr->counters[i]; + + sfn_ptr = list_next_entry(sfn_ptr, head); } }
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index cb131fad117c..997d23641448 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2095,6 +2095,7 @@ config KPROBES_SANITY_TEST depends on DEBUG_KERNEL depends on KPROBES depends on KUNIT + select STACKTRACE if ARCH_CORRECT_STACKTRACE_ON_KRETPROBE default KUNIT_ALL_TESTS help This option provides for testing basic kprobes functionality on diff --git a/lib/fault-inject.c b/lib/fault-inject.c index 423784d9c058..70768d8a2200 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -41,9 +41,6 @@ EXPORT_SYMBOL_GPL(setup_fault_attr);
static void fail_dump(struct fault_attr *attr) { - if (attr->no_warn) - return; - if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) { printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n" "name %pd, interval %lu, probability %lu, " @@ -103,7 +100,7 @@ static inline bool fail_stacktrace(struct fault_attr *attr) * http://www.nongnu.org/failmalloc/ */
-bool should_fail(struct fault_attr *attr, ssize_t size) +bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags) { if (in_task()) { unsigned int fail_nth = READ_ONCE(current->fail_nth); @@ -146,13 +143,19 @@ bool should_fail(struct fault_attr *attr, ssize_t size) return false;
fail: - fail_dump(attr); + if (!(flags & FAULT_NOWARN)) + fail_dump(attr);
if (atomic_read(&attr->times) != -1) atomic_dec_not_zero(&attr->times);
return true; } + +bool should_fail(struct fault_attr *attr, ssize_t size) +{ + return should_fail_ex(attr, size, 0); +} EXPORT_SYMBOL_GPL(should_fail);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS diff --git a/lib/vdso/Makefile b/lib/vdso/Makefile index c415a685d61b..e814061d6aa0 100644 --- a/lib/vdso/Makefile +++ b/lib/vdso/Makefile @@ -17,6 +17,6 @@ $(error ARCH_REL_TYPE_ABS is not set) endif
quiet_cmd_vdso_check = VDSOCHK $@ - cmd_vdso_check = if $(OBJDUMP) -R $@ | egrep -h "$(ARCH_REL_TYPE_ABS)"; \ + cmd_vdso_check = if $(OBJDUMP) -R $@ | grep -E -h "$(ARCH_REL_TYPE_ABS)"; \ then (echo >&2 "$@: dynamic relocations are not supported"; \ rm -f $@; /bin/false); fi diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index bdef9682d0a0..b4b9614eecbe 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -2342,6 +2342,10 @@ static int damon_sysfs_upd_schemes_stats(struct damon_sysfs_kdamond *kdamond) damon_for_each_scheme(scheme, ctx) { struct damon_sysfs_stats *sysfs_stats;
+ /* user could have removed the scheme sysfs dir */ + if (schemes_idx >= sysfs_schemes->nr) + break; + sysfs_stats = sysfs_schemes->schemes_arr[schemes_idx++]->stats; sysfs_stats->nr_tried = scheme->stat.nr_tried; sysfs_stats->sz_tried = scheme->stat.sz_tried; diff --git a/mm/failslab.c b/mm/failslab.c index 58df9789f1d2..ffc420c0e767 100644 --- a/mm/failslab.c +++ b/mm/failslab.c @@ -16,6 +16,8 @@ static struct {
bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags) { + int flags = 0; + /* No fault-injection for bootstrap cache */ if (unlikely(s == kmem_cache)) return false; @@ -30,10 +32,16 @@ bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags) if (failslab.cache_filter && !(s->flags & SLAB_FAILSLAB)) return false;
+ /* + * In some cases, it expects to specify __GFP_NOWARN + * to avoid printing any information(not just a warning), + * thus avoiding deadlocks. See commit 6b9dbedbe349 for + * details. + */ if (gfpflags & __GFP_NOWARN) - failslab.attr.no_warn = true; + flags |= FAULT_NOWARN;
- return should_fail(&failslab.attr, s->object_size); + return should_fail_ex(&failslab.attr, s->object_size, flags); }
static int __init setup_failslab(char *str) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b69979c9ced5..6a95ea7c5ee7 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2971,7 +2971,7 @@ struct obj_cgroup *get_obj_cgroup_from_page(struct page *page) { struct obj_cgroup *objcg;
- if (!memcg_kmem_enabled() || memcg_kmem_bypass()) + if (!memcg_kmem_enabled()) return NULL;
if (PageMemcgKmem(page)) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 19749f5409e7..5d9b98774a82 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3883,6 +3883,8 @@ __setup("fail_page_alloc=", setup_fail_page_alloc);
static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) { + int flags = 0; + if (order < fail_page_alloc.min_order) return false; if (gfp_mask & __GFP_NOFAIL) @@ -3893,10 +3895,11 @@ static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) (gfp_mask & __GFP_DIRECT_RECLAIM)) return false;
+ /* See comment in __should_failslab() */ if (gfp_mask & __GFP_NOWARN) - fail_page_alloc.attr.no_warn = true; + flags |= FAULT_NOWARN;
- return should_fail(&fail_page_alloc.attr, 1 << order); + return should_fail_ex(&fail_page_alloc.attr, 1 << order, flags); }
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS diff --git a/mm/vmscan.c b/mm/vmscan.c index 382dbe97329f..a69f32dd5ce5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2472,8 +2472,20 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, * the flushers simply cannot keep up with the allocation * rate. Nudge the flusher threads in case they are asleep. */ - if (stat.nr_unqueued_dirty == nr_taken) + if (stat.nr_unqueued_dirty == nr_taken) { wakeup_flusher_threads(WB_REASON_VMSCAN); + /* + * For cgroupv1 dirty throttling is achieved by waking up + * the kernel flusher here and later waiting on folios + * which are in writeback to finish (see shrink_folio_list()). + * + * Flusher may not be able to issue writeback quickly + * enough for cgroupv1 writeback throttling to work + * on a large system. + */ + if (!writeback_throttling_sane(sc)) + reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK); + }
sc->nr.dirty += stat.nr_dirty; sc->nr.congested += stat.nr_congested; @@ -2955,8 +2967,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) enum lru_list lru; unsigned long nr_reclaimed = 0; unsigned long nr_to_reclaim = sc->nr_to_reclaim; + bool proportional_reclaim; struct blk_plug plug; - bool scan_adjusted;
get_scan_count(lruvec, sc, nr);
@@ -2974,8 +2986,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) * abort proportional reclaim if either the file or anon lru has already * dropped to zero at the first pass. */ - scan_adjusted = (!cgroup_reclaim(sc) && !current_is_kswapd() && - sc->priority == DEF_PRIORITY); + proportional_reclaim = (!cgroup_reclaim(sc) && !current_is_kswapd() && + sc->priority == DEF_PRIORITY);
blk_start_plug(&plug); while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || @@ -2995,7 +3007,7 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
cond_resched();
- if (nr_reclaimed < nr_to_reclaim || scan_adjusted) + if (nr_reclaimed < nr_to_reclaim || proportional_reclaim) continue;
/* @@ -3046,8 +3058,6 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) nr_scanned = targets[lru] - nr[lru]; nr[lru] = targets[lru] * (100 - percentage) / 100; nr[lru] -= min(nr[lru], nr_scanned); - - scan_adjusted = true; } blk_finish_plug(&plug); sc->nr_reclaimed += nr_reclaimed; diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 0191f22d1ec3..8487321c1fc7 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -202,9 +202,11 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { list_move(&req->req_list, &cancel_list); + req->status = REQ_STATUS_ERROR; } list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { list_move(&req->req_list, &cancel_list); + req->status = REQ_STATUS_ERROR; }
spin_unlock(&m->req_lock); diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 7105529abb0f..c433b1fb961a 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -272,7 +272,7 @@ skb_flow_dissect_ct(const struct sk_buff *skb, key->ct_zone = ct->zone.id; #endif #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) - key->ct_mark = ct->mark; + key->ct_mark = READ_ONCE(ct->mark); #endif
cl = nf_ct_labels_find(ct); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 84755db81e9d..35f5a3125808 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -307,7 +307,31 @@ static int neigh_del_timer(struct neighbour *n) return 0; }
-static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net) +static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev, + int family) +{ + switch (family) { + case AF_INET: + return __in_dev_arp_parms_get_rcu(dev); + case AF_INET6: + return __in6_dev_nd_parms_get_rcu(dev); + } + return NULL; +} + +static void neigh_parms_qlen_dec(struct net_device *dev, int family) +{ + struct neigh_parms *p; + + rcu_read_lock(); + p = neigh_get_dev_parms_rcu(dev, family); + if (p) + p->qlen--; + rcu_read_unlock(); +} + +static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net, + int family) { struct sk_buff_head tmp; unsigned long flags; @@ -321,13 +345,7 @@ static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net) struct net_device *dev = skb->dev;
if (net == NULL || net_eq(dev_net(dev), net)) { - struct in_device *in_dev; - - rcu_read_lock(); - in_dev = __in_dev_get_rcu(dev); - if (in_dev) - in_dev->arp_parms->qlen--; - rcu_read_unlock(); + neigh_parms_qlen_dec(dev, family); __skb_unlink(skb, list); __skb_queue_tail(&tmp, skb); } @@ -409,7 +427,8 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev, write_lock_bh(&tbl->lock); neigh_flush_dev(tbl, dev, skip_perm); pneigh_ifdown_and_unlock(tbl, dev); - pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL); + pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL, + tbl->family); if (skb_queue_empty_lockless(&tbl->proxy_queue)) del_timer_sync(&tbl->proxy_timer); return 0; @@ -1621,13 +1640,8 @@ static void neigh_proxy_process(struct timer_list *t)
if (tdif <= 0) { struct net_device *dev = skb->dev; - struct in_device *in_dev;
- rcu_read_lock(); - in_dev = __in_dev_get_rcu(dev); - if (in_dev) - in_dev->arp_parms->qlen--; - rcu_read_unlock(); + neigh_parms_qlen_dec(dev, tbl->family); __skb_unlink(skb, &tbl->proxy_queue);
if (tbl->proxy_redo && netif_running(dev)) { @@ -1821,7 +1835,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl) cancel_delayed_work_sync(&tbl->managed_work); cancel_delayed_work_sync(&tbl->gc_work); del_timer_sync(&tbl->proxy_timer); - pneigh_queue_purge(&tbl->proxy_queue, NULL); + pneigh_queue_purge(&tbl->proxy_queue, NULL, tbl->family); neigh_ifdown(tbl, NULL); if (atomic_read(&tbl->entries)) pr_crit("neighbour leakage\n"); @@ -3542,18 +3556,6 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write, return ret; }
-static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev, - int family) -{ - switch (family) { - case AF_INET: - return __in_dev_arp_parms_get_rcu(dev); - case AF_INET6: - return __in6_dev_nd_parms_get_rcu(dev); - } - return NULL; -} - static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p, int index) { diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index da6e3b20cd75..60379ad7ae06 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -136,6 +136,8 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) * This unhashes the socket and releases the local port, if necessary. */ dccp_set_state(sk, DCCP_CLOSED); + if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) + inet_reset_saddr(sk); ip_rt_put(rt); sk->sk_route_caps = 0; inet->inet_dport = 0; diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index fd44638ec16b..f9ed81a0ddbb 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -967,6 +967,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
late_failure: dccp_set_state(sk, DCCP_CLOSED); + if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) + inet_reset_saddr(sk); __sk_dst_reset(sk); failure: inet->inet_dport = 0; diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index e983bb0c5012..2dfb12230f08 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -402,6 +402,16 @@ config INET_IPCOMP
If unsure, say Y.
+config INET_TABLE_PERTURB_ORDER + int "INET: Source port perturbation table size (as power of 2)" if EXPERT + default 16 + help + Source port perturbation table size (as power of 2) for + RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm. + + The default is almost always what you want. + Only change this if you know what you are doing. + config INET_XFRM_TUNNEL tristate select INET_TUNNEL diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index 170152772d33..3969fa805679 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -314,6 +314,9 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_ xo->seq.low += skb_shinfo(skb)->gso_segs; }
+ if (xo->seq.low < seq) + xo->seq.hi++; + esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
ip_hdr(skb)->tot_len = htons(skb->len); diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 452ff177e4da..f26d5ac117d6 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1381,8 +1381,10 @@ int fib_table_insert(struct net *net, struct fib_table *tb,
/* The alias was already inserted, so the node must exist. */ l = l ? l : fib_find_node(t, &tp, key); - if (WARN_ON_ONCE(!l)) + if (WARN_ON_ONCE(!l)) { + err = -ENOENT; goto out_free_new_fa; + }
if (fib_find_alias(&l->leaf, new_fa->fa_slen, 0, 0, tb->tb_id, true) == new_fa) { diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index f5950a7172d6..1e45fe6276f7 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -679,13 +679,13 @@ EXPORT_SYMBOL_GPL(inet_unhash); * Note that we use 32bit integers (vs RFC 'short integers') * because 2^16 is not a multiple of num_ephemeral and this * property might be used by clever attacker. + * * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though - * attacks were since demonstrated, thus we use 65536 instead to really - * give more isolation and privacy, at the expense of 256kB of kernel - * memory. + * attacks were since demonstrated, thus we use 65536 by default instead + * to really give more isolation and privacy, at the expense of 256kB + * of kernel memory. */ -#define INET_TABLE_PERTURB_SHIFT 16 -#define INET_TABLE_PERTURB_SIZE (1 << INET_TABLE_PERTURB_SHIFT) +#define INET_TABLE_PERTURB_SIZE (1 << CONFIG_INET_TABLE_PERTURB_ORDER) static u32 *table_perturb;
int __inet_hash_connect(struct inet_timewait_death_row *death_row, diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 1b512390b3cf..e880ce77322a 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -366,6 +366,11 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk, iph->tos, dev); if (unlikely(err)) goto drop_error; + } else { + struct in_device *in_dev = __in_dev_get_rcu(dev); + + if (in_dev && IN_DEV_ORCONF(in_dev, NOPOLICY)) + IPCB(skb)->flags |= IPSKB_NOPOLICY; }
#ifdef CONFIG_IP_ROUTE_CLASSID diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index f8e176c77d1c..b3cc416ed292 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -435,7 +435,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
switch (ctinfo) { case IP_CT_NEW: - ct->mark = hash; + WRITE_ONCE(ct->mark, hash); break; case IP_CT_RELATED: case IP_CT_RELATED_REPLY: @@ -452,7 +452,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par) #ifdef DEBUG nf_ct_dump_tuple_ip(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); #endif - pr_debug("hash=%u ct_hash=%u ", hash, ct->mark); + pr_debug("hash=%u ct_hash=%u ", hash, READ_ONCE(ct->mark)); if (!clusterip_responsible(cipinfo->config, hash)) { pr_debug("not responsible\n"); return NF_DROP; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index fe9a6022db66..ef8013e2134f 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -323,6 +323,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) * if necessary. */ tcp_set_state(sk, TCP_CLOSE); + if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) + inet_reset_saddr(sk); ip_rt_put(rt); sk->sk_route_caps = 0; inet->inet_dport = 0; diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index 79d43548279c..242f4295940e 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -346,6 +346,9 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features xo->seq.low += skb_shinfo(skb)->gso_segs; }
+ if (xo->seq.low < seq) + xo->seq.hi++; + esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
len = skb->len - sizeof(struct ipv6hdr); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index e54eee80ce5f..5516cfb96c48 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -340,6 +340,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
late_failure: tcp_set_state(sk, TCP_CLOSE); + if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) + inet_reset_saddr(sk); failure: inet->inet_dport = 0; sk->sk_route_caps = 0; diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 4a4b0e49ec92..ea435eba3053 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -287,9 +287,13 @@ int __init xfrm6_init(void) if (ret) goto out_state;
- register_pernet_subsys(&xfrm6_net_ops); + ret = register_pernet_subsys(&xfrm6_net_ops); + if (ret) + goto out_protocol; out: return ret; +out_protocol: + xfrm6_protocol_fini(); out_state: xfrm6_state_fini(); out_policy: diff --git a/net/key/af_key.c b/net/key/af_key.c index c85df5b958d2..95edcbedf6ef 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -2905,7 +2905,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t) break; if (!aalg->pfkey_supported) continue; - if (aalg_tmpl_set(t, aalg) && aalg->available) + if (aalg_tmpl_set(t, aalg)) sz += sizeof(struct sadb_comb); } return sz + sizeof(struct sadb_prop); @@ -2923,7 +2923,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t) if (!ealg->pfkey_supported) continue;
- if (!(ealg_tmpl_set(t, ealg) && ealg->available)) + if (!(ealg_tmpl_set(t, ealg))) continue;
for (k = 1; ; k++) { @@ -2934,16 +2934,17 @@ static int count_esp_combs(const struct xfrm_tmpl *t) if (!aalg->pfkey_supported) continue;
- if (aalg_tmpl_set(t, aalg) && aalg->available) + if (aalg_tmpl_set(t, aalg)) sz += sizeof(struct sadb_comb); } } return sz + sizeof(struct sadb_prop); }
-static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t) +static int dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t) { struct sadb_prop *p; + int sz = 0; int i;
p = skb_put(skb, sizeof(struct sadb_prop)); @@ -2971,13 +2972,17 @@ static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t) c->sadb_comb_soft_addtime = 20*60*60; c->sadb_comb_hard_usetime = 8*60*60; c->sadb_comb_soft_usetime = 7*60*60; + sz += sizeof(*c); } } + + return sz + sizeof(*p); }
-static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t) +static int dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t) { struct sadb_prop *p; + int sz = 0; int i, k;
p = skb_put(skb, sizeof(struct sadb_prop)); @@ -3019,8 +3024,11 @@ static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t) c->sadb_comb_soft_addtime = 20*60*60; c->sadb_comb_hard_usetime = 8*60*60; c->sadb_comb_soft_usetime = 7*60*60; + sz += sizeof(*c); } } + + return sz + sizeof(*p); }
static int key_notify_policy_expire(struct xfrm_policy *xp, const struct km_event *c) @@ -3150,6 +3158,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct struct sadb_x_sec_ctx *sec_ctx; struct xfrm_sec_ctx *xfrm_ctx; int ctx_size = 0; + int alg_size = 0;
sockaddr_size = pfkey_sockaddr_size(x->props.family); if (!sockaddr_size) @@ -3161,16 +3170,16 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct sizeof(struct sadb_x_policy);
if (x->id.proto == IPPROTO_AH) - size += count_ah_combs(t); + alg_size = count_ah_combs(t); else if (x->id.proto == IPPROTO_ESP) - size += count_esp_combs(t); + alg_size = count_esp_combs(t);
if ((xfrm_ctx = x->security)) { ctx_size = PFKEY_ALIGN8(xfrm_ctx->ctx_len); size += sizeof(struct sadb_x_sec_ctx) + ctx_size; }
- skb = alloc_skb(size + 16, GFP_ATOMIC); + skb = alloc_skb(size + alg_size + 16, GFP_ATOMIC); if (skb == NULL) return -ENOMEM;
@@ -3224,10 +3233,13 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct pol->sadb_x_policy_priority = xp->priority;
/* Set sadb_comb's. */ + alg_size = 0; if (x->id.proto == IPPROTO_AH) - dump_ah_combs(skb, t); + alg_size = dump_ah_combs(skb, t); else if (x->id.proto == IPPROTO_ESP) - dump_esp_combs(skb, t); + alg_size = dump_esp_combs(skb, t); + + hdr->sadb_msg_len += alg_size / 8;
/* security context */ if (xfrm_ctx) { @@ -3382,7 +3394,7 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, hdr->sadb_msg_len = size / sizeof(uint64_t); hdr->sadb_msg_errno = 0; hdr->sadb_msg_reserved = 0; - hdr->sadb_msg_seq = x->km.seq = get_acqseq(); + hdr->sadb_msg_seq = x->km.seq; hdr->sadb_msg_pid = 0;
/* SA */ diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 5b1c47ed0cc0..87e24bba4c67 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -1437,8 +1437,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) ieee80211_led_exit(local); destroy_workqueue(local->workqueue); fail_workqueue: - if (local->wiphy_ciphers_allocated) + if (local->wiphy_ciphers_allocated) { kfree(local->hw.wiphy->cipher_suites); + local->wiphy_ciphers_allocated = false; + } kfree(local->int_scan_req); return result; } @@ -1506,8 +1508,10 @@ void ieee80211_free_hw(struct ieee80211_hw *hw) mutex_destroy(&local->iflist_mtx); mutex_destroy(&local->mtx);
- if (local->wiphy_ciphers_allocated) + if (local->wiphy_ciphers_allocated) { kfree(local->hw.wiphy->cipher_suites); + local->wiphy_ciphers_allocated = false; + }
idr_for_each(&local->ack_status_frames, ieee80211_free_ack_frame, NULL); diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index acc1c299f1ae..69d5e1ec6ede 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -710,7 +710,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { - kfree_skb(skb); + ieee80211_free_txskb(&sdata->local->hw, skb); sdata->u.mesh.mshstats.dropped_frames_no_route++; }
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h index 3adc291d9ce1..7499192af586 100644 --- a/net/netfilter/ipset/ip_set_hash_gen.h +++ b/net/netfilter/ipset/ip_set_hash_gen.h @@ -916,7 +916,7 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext, #ifdef IP_SET_HASH_WITH_MULTI if (h->bucketsize >= AHASH_MAX_TUNED) goto set_full; - else if (h->bucketsize < multi) + else if (h->bucketsize <= multi) h->bucketsize += AHASH_INIT_SIZE; #endif if (n->size >= AHASH_MAX(h)) { diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c index dd30c03d5a23..75d556d71652 100644 --- a/net/netfilter/ipset/ip_set_hash_ip.c +++ b/net/netfilter/ipset/ip_set_hash_ip.c @@ -151,18 +151,16 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], if (((u64)ip_to - ip + 1) >> (32 - h->netmask) > IPSET_MAX_RANGE) return -ERANGE;
- if (retried) { + if (retried) ip = ntohl(h->next.ip); - e.ip = htonl(ip); - } for (; ip <= ip_to;) { + e.ip = htonl(ip); ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret;
ip += hosts; - e.ip = htonl(ip); - if (e.ip == 0) + if (ip == 0) return 0;
ret = 0; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 8f261cd5b3a5..60289c074eef 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1781,7 +1781,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, }
#ifdef CONFIG_NF_CONNTRACK_MARK - ct->mark = exp->master->mark; + ct->mark = READ_ONCE(exp->master->mark); #endif #ifdef CONFIG_NF_CONNTRACK_SECMARK ct->secmark = exp->master->secmark; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 7562b215b932..d71150a40fb0 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -328,9 +328,9 @@ ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct) }
#ifdef CONFIG_NF_CONNTRACK_MARK -static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) +static int ctnetlink_dump_mark(struct sk_buff *skb, u32 mark) { - if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark))) + if (nla_put_be32(skb, CTA_MARK, htonl(mark))) goto nla_put_failure; return 0;
@@ -543,7 +543,7 @@ static int ctnetlink_dump_extinfo(struct sk_buff *skb, static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct) { if (ctnetlink_dump_status(skb, ct) < 0 || - ctnetlink_dump_mark(skb, ct) < 0 || + ctnetlink_dump_mark(skb, READ_ONCE(ct->mark)) < 0 || ctnetlink_dump_secctx(skb, ct) < 0 || ctnetlink_dump_id(skb, ct) < 0 || ctnetlink_dump_use(skb, ct) < 0 || @@ -722,6 +722,7 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item) struct sk_buff *skb; unsigned int type; unsigned int flags = 0, group; + u32 mark; int err;
if (events & (1 << IPCT_DESTROY)) { @@ -826,8 +827,9 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item) }
#ifdef CONFIG_NF_CONNTRACK_MARK - if ((events & (1 << IPCT_MARK) || ct->mark) - && ctnetlink_dump_mark(skb, ct) < 0) + mark = READ_ONCE(ct->mark); + if ((events & (1 << IPCT_MARK) || mark) && + ctnetlink_dump_mark(skb, mark) < 0) goto nla_put_failure; #endif nlmsg_end(skb, nlh); @@ -1154,7 +1156,7 @@ static int ctnetlink_filter_match(struct nf_conn *ct, void *data) }
#ifdef CONFIG_NF_CONNTRACK_MARK - if ((ct->mark & filter->mark.mask) != filter->mark.val) + if ((READ_ONCE(ct->mark) & filter->mark.mask) != filter->mark.val) goto ignore_entry; #endif status = (u32)READ_ONCE(ct->status); @@ -2002,9 +2004,9 @@ static void ctnetlink_change_mark(struct nf_conn *ct, mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
mark = ntohl(nla_get_be32(cda[CTA_MARK])); - newmark = (ct->mark & mask) ^ mark; - if (newmark != ct->mark) - ct->mark = newmark; + newmark = (READ_ONCE(ct->mark) & mask) ^ mark; + if (newmark != READ_ONCE(ct->mark)) + WRITE_ONCE(ct->mark, newmark); } #endif
@@ -2669,6 +2671,7 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct) { const struct nf_conntrack_zone *zone; struct nlattr *nest_parms; + u32 mark;
zone = nf_ct_zone(ct);
@@ -2730,7 +2733,8 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct) goto nla_put_failure;
#ifdef CONFIG_NF_CONNTRACK_MARK - if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0) + mark = READ_ONCE(ct->mark); + if (mark && ctnetlink_dump_mark(skb, mark) < 0) goto nla_put_failure; #endif if (ctnetlink_dump_labels(skb, ct) < 0) diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 4ffe84c5a82c..bca839ab1ae8 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -366,7 +366,7 @@ static int ct_seq_show(struct seq_file *s, void *v) goto release;
#if defined(CONFIG_NF_CONNTRACK_MARK) - seq_printf(s, "mark=%u ", ct->mark); + seq_printf(s, "mark=%u ", READ_ONCE(ct->mark)); #endif
ct_show_secctx(s, ct); diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index b04645ced89b..00b522890d77 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -1098,6 +1098,7 @@ static int nf_flow_table_block_setup(struct nf_flowtable *flowtable, struct flow_block_cb *block_cb, *next; int err = 0;
+ down_write(&flowtable->flow_block_lock); switch (cmd) { case FLOW_BLOCK_BIND: list_splice(&bo->cb_list, &flowtable->flow_block.cb_list); @@ -1112,6 +1113,7 @@ static int nf_flow_table_block_setup(struct nf_flowtable *flowtable, WARN_ON_ONCE(1); err = -EOPNOTSUPP; } + up_write(&flowtable->flow_block_lock);
return err; } @@ -1168,7 +1170,9 @@ static int nf_flow_table_offload_cmd(struct flow_block_offload *bo,
nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable, extack); + down_write(&flowtable->flow_block_lock); err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, bo); + up_write(&flowtable->flow_block_lock); if (err < 0) return err;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 42e370575c30..0a6f3c1e9ab7 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -5958,7 +5958,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, &timeout); if (err) return err; - } else if (set->flags & NFT_SET_TIMEOUT) { + } else if (set->flags & NFT_SET_TIMEOUT && + !(flags & NFT_SET_ELEM_INTERVAL_END)) { timeout = set->timeout; }
@@ -6024,7 +6025,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, err = -EOPNOTSUPP; goto err_set_elem_expr; } - } else if (set->num_exprs > 0) { + } else if (set->num_exprs > 0 && + !(flags & NFT_SET_ELEM_INTERVAL_END)) { err = nft_set_elem_expr_clone(ctx, set, expr_array); if (err < 0) goto err_set_elem_expr_clone; diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index a3f01f209a53..641dc21f92b4 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -98,7 +98,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr, return; #ifdef CONFIG_NF_CONNTRACK_MARK case NFT_CT_MARK: - *dest = ct->mark; + *dest = READ_ONCE(ct->mark); return; #endif #ifdef CONFIG_NF_CONNTRACK_SECMARK @@ -297,8 +297,8 @@ static void nft_ct_set_eval(const struct nft_expr *expr, switch (priv->key) { #ifdef CONFIG_NF_CONNTRACK_MARK case NFT_CT_MARK: - if (ct->mark != value) { - ct->mark = value; + if (READ_ONCE(ct->mark) != value) { + WRITE_ONCE(ct->mark, value); nf_conntrack_event_cache(IPCT_MARK, ct); } break; diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index e5ebc0810675..ad3c033db64e 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c @@ -30,6 +30,7 @@ connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info) u_int32_t new_targetmark; struct nf_conn *ct; u_int32_t newmark; + u_int32_t oldmark;
ct = nf_ct_get(skb, &ctinfo); if (ct == NULL) @@ -37,14 +38,15 @@ connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
switch (info->mode) { case XT_CONNMARK_SET: - newmark = (ct->mark & ~info->ctmask) ^ info->ctmark; + oldmark = READ_ONCE(ct->mark); + newmark = (oldmark & ~info->ctmask) ^ info->ctmark; if (info->shift_dir == D_SHIFT_RIGHT) newmark >>= info->shift_bits; else newmark <<= info->shift_bits;
- if (ct->mark != newmark) { - ct->mark = newmark; + if (READ_ONCE(ct->mark) != newmark) { + WRITE_ONCE(ct->mark, newmark); nf_conntrack_event_cache(IPCT_MARK, ct); } break; @@ -55,15 +57,15 @@ connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info) else new_targetmark <<= info->shift_bits;
- newmark = (ct->mark & ~info->ctmask) ^ + newmark = (READ_ONCE(ct->mark) & ~info->ctmask) ^ new_targetmark; - if (ct->mark != newmark) { - ct->mark = newmark; + if (READ_ONCE(ct->mark) != newmark) { + WRITE_ONCE(ct->mark, newmark); nf_conntrack_event_cache(IPCT_MARK, ct); } break; case XT_CONNMARK_RESTORE: - new_targetmark = (ct->mark & info->ctmask); + new_targetmark = (READ_ONCE(ct->mark) & info->ctmask); if (info->shift_dir == D_SHIFT_RIGHT) new_targetmark >>= info->shift_bits; else @@ -126,7 +128,7 @@ connmark_mt(const struct sk_buff *skb, struct xt_action_param *par) if (ct == NULL) return false;
- return ((ct->mark & info->mask) == info->mark) ^ info->invert; + return ((READ_ONCE(ct->mark) & info->mask) == info->mark) ^ info->invert; }
static int connmark_mt_check(const struct xt_mtchk_param *par) diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 6a193cce2a75..4ffdf2f45c44 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -542,7 +542,7 @@ static int nci_open_device(struct nci_dev *ndev) skb_queue_purge(&ndev->tx_q);
ndev->ops->close(ndev); - ndev->flags = 0; + ndev->flags &= BIT(NCI_UNREG); }
done: diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c index aa5e712adf07..3d36ea5701f0 100644 --- a/net/nfc/nci/data.c +++ b/net/nfc/nci/data.c @@ -279,8 +279,10 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_plen(skb->data));
conn_info = nci_get_conn_info_by_conn_id(ndev, nci_conn_id(skb->data)); - if (!conn_info) + if (!conn_info) { + kfree_skb(skb); return; + }
/* strip the nci data header */ skb_pull(skb, NCI_DATA_HDR_SIZE); diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 4e70df91d0f2..fc5b374fe568 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -152,7 +152,7 @@ static u8 ovs_ct_get_state(enum ip_conntrack_info ctinfo) static u32 ovs_ct_get_mark(const struct nf_conn *ct) { #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) - return ct ? ct->mark : 0; + return ct ? READ_ONCE(ct->mark) : 0; #else return 0; #endif @@ -340,9 +340,9 @@ static int ovs_ct_set_mark(struct nf_conn *ct, struct sw_flow_key *key, #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) u32 new_mark;
- new_mark = ct_mark | (ct->mark & ~(mask)); - if (ct->mark != new_mark) { - ct->mark = new_mark; + new_mark = ct_mark | (READ_ONCE(ct->mark) & ~(mask)); + if (READ_ONCE(ct->mark) != new_mark) { + WRITE_ONCE(ct->mark, new_mark); if (nf_ct_is_confirmed(ct)) nf_conntrack_event_cache(IPCT_MARK, ct); key->ct.mark = new_mark; diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 62c70709d798..e0123efa2a62 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -399,6 +399,7 @@ enum rxrpc_conn_proto_state { struct rxrpc_bundle { struct rxrpc_conn_parameters params; refcount_t ref; + atomic_t active; /* Number of active users */ unsigned int debug_id; bool try_upgrade; /* True if the bundle is attempting upgrade */ bool alloc_conn; /* True if someone's getting a conn */ diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index 3c9eeb5b750c..bdb335cb2d05 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -40,6 +40,8 @@ __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ; DEFINE_IDR(rxrpc_client_conn_ids); static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
+static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle); + /* * Get a connection ID and epoch for a client connection from the global pool. * The connection struct pointer is then recorded in the idr radix tree. The @@ -123,6 +125,7 @@ static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp, bundle->params = *cp; rxrpc_get_peer(bundle->params.peer); refcount_set(&bundle->ref, 1); + atomic_set(&bundle->active, 1); spin_lock_init(&bundle->channel_lock); INIT_LIST_HEAD(&bundle->waiting_calls); } @@ -149,7 +152,7 @@ void rxrpc_put_bundle(struct rxrpc_bundle *bundle)
dead = __refcount_dec_and_test(&bundle->ref, &r);
- _debug("PUT B=%x %d", d, r); + _debug("PUT B=%x %d", d, r - 1); if (dead) rxrpc_free_bundle(bundle); } @@ -338,6 +341,7 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c rxrpc_free_bundle(candidate); found_bundle: rxrpc_get_bundle(bundle); + atomic_inc(&bundle->active); spin_unlock(&local->client_bundles_lock); _leave(" = %u [found]", bundle->debug_id); return bundle; @@ -435,6 +439,7 @@ static void rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, gfp_t gfp) if (old) trace_rxrpc_client(old, -1, rxrpc_client_replace); candidate->bundle_shift = shift; + atomic_inc(&bundle->active); bundle->conns[i] = candidate; for (j = 0; j < RXRPC_MAXCALLS; j++) set_bit(shift + j, &bundle->avail_chans); @@ -725,6 +730,7 @@ int rxrpc_connect_call(struct rxrpc_sock *rx, smp_rmb();
out_put_bundle: + rxrpc_deactivate_bundle(bundle); rxrpc_put_bundle(bundle); out: _leave(" = %d", ret); @@ -900,9 +906,8 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call static void rxrpc_unbundle_conn(struct rxrpc_connection *conn) { struct rxrpc_bundle *bundle = conn->bundle; - struct rxrpc_local *local = bundle->params.local; unsigned int bindex; - bool need_drop = false, need_put = false; + bool need_drop = false; int i;
_enter("C=%x", conn->debug_id); @@ -921,15 +926,22 @@ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn) } spin_unlock(&bundle->channel_lock);
- /* If there are no more connections, remove the bundle */ - if (!bundle->avail_chans) { - _debug("maybe unbundle"); - spin_lock(&local->client_bundles_lock); + if (need_drop) { + rxrpc_deactivate_bundle(bundle); + rxrpc_put_connection(conn); + } +}
- for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) - if (bundle->conns[i]) - break; - if (i == ARRAY_SIZE(bundle->conns) && !bundle->params.exclusive) { +/* + * Drop the active count on a bundle. + */ +static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle) +{ + struct rxrpc_local *local = bundle->params.local; + bool need_put = false; + + if (atomic_dec_and_lock(&bundle->active, &local->client_bundles_lock)) { + if (!bundle->params.exclusive) { _debug("erase bundle"); rb_erase(&bundle->local_node, &local->client_bundles); need_put = true; @@ -939,10 +951,6 @@ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn) if (need_put) rxrpc_put_bundle(bundle); } - - if (need_drop) - rxrpc_put_connection(conn); - _leave(""); }
/* diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 1e8ab4749c6c..4662a6ce8a7e 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -976,7 +976,7 @@ config NET_ACT_TUNNEL_KEY
config NET_ACT_CT tristate "connection tracking tc action" - depends on NET_CLS_ACT && NF_CONNTRACK && NF_NAT && NF_FLOW_TABLE + depends on NET_CLS_ACT && NF_CONNTRACK && (!NF_NAT || NF_NAT) && NF_FLOW_TABLE help Say Y here to allow sending the packets to conntrack module.
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 09e2aafc8943..0deb4e96a6c2 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -62,7 +62,7 @@ static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a,
c = nf_ct_get(skb, &ctinfo); if (c) { - skb->mark = c->mark; + skb->mark = READ_ONCE(c->mark); /* using overlimits stats to count how many packets marked */ ca->tcf_qstats.overlimits++; goto out; @@ -82,7 +82,7 @@ static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a, c = nf_ct_tuplehash_to_ctrack(thash); /* using overlimits stats to count how many packets marked */ ca->tcf_qstats.overlimits++; - skb->mark = c->mark; + skb->mark = READ_ONCE(c->mark); nf_ct_put(c);
out: diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index 5950974ae8f6..a015915e5b72 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -178,7 +178,7 @@ static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct, entry = tcf_ct_flow_table_flow_action_get_next(action); entry->id = FLOW_ACTION_CT_METADATA; #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) - entry->ct_metadata.mark = ct->mark; + entry->ct_metadata.mark = READ_ONCE(ct->mark); #endif ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED : IP_CT_ESTABLISHED_REPLY; @@ -940,9 +940,9 @@ static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask) if (!mask) return;
- new_mark = mark | (ct->mark & ~(mask)); - if (ct->mark != new_mark) { - ct->mark = new_mark; + new_mark = mark | (READ_ONCE(ct->mark) & ~(mask)); + if (READ_ONCE(ct->mark) != new_mark) { + WRITE_ONCE(ct->mark, new_mark); if (nf_ct_is_confirmed(ct)) nf_conntrack_event_cache(IPCT_MARK, ct); } diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c index 0281e45987a4..65a20f3c9514 100644 --- a/net/sched/act_ctinfo.c +++ b/net/sched/act_ctinfo.c @@ -33,7 +33,7 @@ static void tcf_ctinfo_dscp_set(struct nf_conn *ct, struct tcf_ctinfo *ca, { u8 dscp, newdscp;
- newdscp = (((ct->mark & cp->dscpmask) >> cp->dscpmaskshift) << 2) & + newdscp = (((READ_ONCE(ct->mark) & cp->dscpmask) >> cp->dscpmaskshift) << 2) & ~INET_ECN_MASK;
switch (proto) { @@ -73,7 +73,7 @@ static void tcf_ctinfo_cpmark_set(struct nf_conn *ct, struct tcf_ctinfo *ca, struct sk_buff *skb) { ca->stats_cpmark_set++; - skb->mark = ct->mark & cp->cpmarkmask; + skb->mark = READ_ONCE(ct->mark) & cp->cpmarkmask; }
static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a, @@ -131,7 +131,7 @@ static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a, }
if (cp->mode & CTINFO_MODE_DSCP) - if (!cp->dscpstatemask || (ct->mark & cp->dscpstatemask)) + if (!cp->dscpstatemask || (READ_ONCE(ct->mark) & cp->dscpstatemask)) tcf_ctinfo_dscp_set(ct, ca, cp, skb, wlen, proto);
if (cp->mode & CTINFO_MODE_CPMARK) diff --git a/net/tipc/discover.c b/net/tipc/discover.c index e8630707901e..e8dcdf267c0c 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -211,7 +211,10 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb, u32 self; int err;
- skb_linearize(skb); + if (skb_linearize(skb)) { + kfree_skb(skb); + return; + } hdr = buf_msg(skb);
if (caps & TIPC_NODE_ID128) diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index d92ec92f0b71..e3b427a70398 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c @@ -176,7 +176,7 @@ static void tipc_conn_close(struct tipc_conn *con) conn_put(con); }
-static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s) +static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s, struct socket *sock) { struct tipc_conn *con; int ret; @@ -202,10 +202,12 @@ static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s) } con->conid = ret; s->idr_in_use++; - spin_unlock_bh(&s->idr_lock);
set_bit(CF_CONNECTED, &con->flags); con->server = s; + con->sock = sock; + conn_get(con); + spin_unlock_bh(&s->idr_lock);
return con; } @@ -467,7 +469,7 @@ static void tipc_topsrv_accept(struct work_struct *work) ret = kernel_accept(lsock, &newsock, O_NONBLOCK); if (ret < 0) return; - con = tipc_conn_alloc(srv); + con = tipc_conn_alloc(srv, newsock); if (IS_ERR(con)) { ret = PTR_ERR(con); sock_release(newsock); @@ -479,11 +481,11 @@ static void tipc_topsrv_accept(struct work_struct *work) newsk->sk_data_ready = tipc_conn_data_ready; newsk->sk_write_space = tipc_conn_write_space; newsk->sk_user_data = con; - con->sock = newsock; write_unlock_bh(&newsk->sk_callback_lock);
/* Wake up receive process in case of 'SYN+' message */ newsk->sk_data_ready(newsk); + conn_put(con); } }
@@ -577,17 +579,17 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower, sub.filter = filter; *(u64 *)&sub.usr_handle = (u64)port;
- con = tipc_conn_alloc(tipc_topsrv(net)); + con = tipc_conn_alloc(tipc_topsrv(net), NULL); if (IS_ERR(con)) return false;
*conid = con->conid; - con->sock = NULL; rc = tipc_conn_rcv_sub(tipc_topsrv(net), con, &sub); - if (rc >= 0) - return true; + if (rc) + conn_put(con); + conn_put(con); - return false; + return !rc; }
void tipc_topsrv_kern_unsubscr(struct net *net, int conid) diff --git a/net/wireless/util.c b/net/wireless/util.c index 775836f6785a..450d609b512a 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -1555,10 +1555,12 @@ static u32 cfg80211_calculate_bitrate_eht(struct rate_info *rate) tmp = result; tmp *= SCALE; do_div(tmp, mcs_divisors[rate->mcs]); - result = tmp;
/* and take NSS */ - result = (result * rate->nss) / 8; + tmp *= rate->nss; + do_div(tmp, 8); + + result = tmp;
return result / 10000; } diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 637ca8838436..9af6bf1652e4 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -97,6 +97,18 @@ static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb) } }
+static inline bool xmit_xfrm_check_overflow(struct sk_buff *skb) +{ + struct xfrm_offload *xo = xfrm_offload(skb); + __u32 seq = xo->seq.low; + + seq += skb_shinfo(skb)->gso_segs; + if (unlikely(seq < xo->seq.low)) + return true; + + return false; +} + struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again) { int err; @@ -134,7 +146,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur return skb; }
- if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) { + if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) || + unlikely(xmit_xfrm_check_overflow(skb)))) { struct sk_buff *segs;
/* Packet got rerouted, fixup features and segment it. */ diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 9277d81b344c..49dd788859d8 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c @@ -714,7 +714,7 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff oseq += skb_shinfo(skb)->gso_segs; }
- if (unlikely(oseq < replay_esn->oseq)) { + if (unlikely(xo->seq.low < replay_esn->oseq)) { XFRM_SKB_CB(skb)->seq.output.hi = ++oseq_hi; xo->seq.hi = oseq_hi; replay_esn->oseq_hi = oseq_hi; diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c index b9eb3208f288..ae31bb127594 100644 --- a/sound/hda/intel-dsp-config.c +++ b/sound/hda/intel-dsp-config.c @@ -320,6 +320,11 @@ static const struct config_entry config_table[] = { {} } }, + { + .flags = FLAG_SOF, + .device = 0x34c8, + .codec_hid = &essx_83x6, + }, { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = 0x34c8, diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c index 6c0f1de10429..d9715bea965e 100644 --- a/sound/soc/amd/yc/acp6x-mach.c +++ b/sound/soc/amd/yc/acp6x-mach.c @@ -206,6 +206,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "UM5302TA"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Alienware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m17 R5 AMD"), + } + }, {} };
diff --git a/sound/soc/codecs/hdac_hda.h b/sound/soc/codecs/hdac_hda.h index d0efc5e254ae..da0ed74758b0 100644 --- a/sound/soc/codecs/hdac_hda.h +++ b/sound/soc/codecs/hdac_hda.h @@ -14,7 +14,7 @@ enum { HDAC_HDMI_1_DAI_ID, HDAC_HDMI_2_DAI_ID, HDAC_HDMI_3_DAI_ID, - HDAC_LAST_DAI_ID = HDAC_HDMI_3_DAI_ID, + HDAC_DAI_ID_NUM };
struct hdac_hda_pcm { @@ -24,7 +24,7 @@ struct hdac_hda_pcm {
struct hdac_hda_priv { struct hda_codec codec; - struct hdac_hda_pcm pcm[HDAC_LAST_DAI_ID]; + struct hdac_hda_pcm pcm[HDAC_DAI_ID_NUM]; bool need_display_power; };
diff --git a/sound/soc/codecs/max98373-i2c.c b/sound/soc/codecs/max98373-i2c.c index 3e04c7f0cce4..ec0905df65d1 100644 --- a/sound/soc/codecs/max98373-i2c.c +++ b/sound/soc/codecs/max98373-i2c.c @@ -549,6 +549,10 @@ static int max98373_i2c_probe(struct i2c_client *i2c) max98373->cache = devm_kcalloc(&i2c->dev, max98373->cache_num, sizeof(*max98373->cache), GFP_KERNEL); + if (!max98373->cache) { + ret = -ENOMEM; + return ret; + }
for (i = 0; i < max98373->cache_num; i++) max98373->cache[i].reg = max98373_i2c_cache_reg[i]; diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index 3fafd9fc5cfd..75a45ad55aa8 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -1794,6 +1794,7 @@ static int sgtl5000_i2c_remove(struct i2c_client *client) { struct sgtl5000_priv *sgtl5000 = i2c_get_clientdata(client);
+ regmap_write(sgtl5000->regmap, SGTL5000_CHIP_CLK_CTRL, SGTL5000_CHIP_CLK_CTRL_DEFAULT); regmap_write(sgtl5000->regmap, SGTL5000_CHIP_DIG_POWER, SGTL5000_DIG_POWER_DEFAULT); regmap_write(sgtl5000->regmap, SGTL5000_CHIP_ANA_POWER, SGTL5000_ANA_POWER_DEFAULT);
diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c index 6432b83f616f..a935c5fd9edb 100644 --- a/sound/soc/intel/boards/bytcht_es8316.c +++ b/sound/soc/intel/boards/bytcht_es8316.c @@ -443,6 +443,13 @@ static const struct dmi_system_id byt_cht_es8316_quirk_table[] = { | BYT_CHT_ES8316_INTMIC_IN2_MAP | BYT_CHT_ES8316_JD_INVERTED), }, + { /* Nanote UMPC-01 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "RWC CO.,LTD"), + DMI_MATCH(DMI_PRODUCT_NAME, "UMPC-01"), + }, + .driver_data = (void *)BYT_CHT_ES8316_INTMIC_IN1_MAP, + }, { /* Teclast X98 Plus II */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"), diff --git a/sound/soc/intel/boards/sof_es8336.c b/sound/soc/intel/boards/sof_es8336.c index 606cc3242a60..5c218a39ca20 100644 --- a/sound/soc/intel/boards/sof_es8336.c +++ b/sound/soc/intel/boards/sof_es8336.c @@ -63,6 +63,7 @@ struct sof_es8336_private { struct snd_soc_jack jack; struct list_head hdmi_pcm_list; bool speaker_en; + struct delayed_work pcm_pop_work; };
struct sof_hdmi_pcm { @@ -111,6 +112,46 @@ static void log_quirks(struct device *dev) dev_info(dev, "quirk headset at mic1 port enabled\n"); }
+static void pcm_pop_work_events(struct work_struct *work) +{ + struct sof_es8336_private *priv = + container_of(work, struct sof_es8336_private, pcm_pop_work.work); + + gpiod_set_value_cansleep(priv->gpio_speakers, priv->speaker_en); + + if (quirk & SOF_ES8336_HEADPHONE_GPIO) + gpiod_set_value_cansleep(priv->gpio_headphone, priv->speaker_en); + +} + +static int sof_8336_trigger(struct snd_pcm_substream *substream, int cmd) +{ + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); + struct snd_soc_card *card = rtd->card; + struct sof_es8336_private *priv = snd_soc_card_get_drvdata(card); + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + case SNDRV_PCM_TRIGGER_RESUME: + break; + + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + case SNDRV_PCM_TRIGGER_SUSPEND: + case SNDRV_PCM_TRIGGER_STOP: + if (priv->speaker_en == false) + if (substream->stream == 0) { + cancel_delayed_work(&priv->pcm_pop_work); + gpiod_set_value_cansleep(priv->gpio_speakers, true); + } + break; + default: + return -EINVAL; + } + + return 0; +} + static int sof_es8316_speaker_power_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { @@ -122,19 +163,7 @@ static int sof_es8316_speaker_power_event(struct snd_soc_dapm_widget *w,
priv->speaker_en = !SND_SOC_DAPM_EVENT_ON(event);
- if (SND_SOC_DAPM_EVENT_ON(event)) - msleep(70); - - gpiod_set_value_cansleep(priv->gpio_speakers, priv->speaker_en); - - if (!(quirk & SOF_ES8336_HEADPHONE_GPIO)) - return 0; - - if (SND_SOC_DAPM_EVENT_ON(event)) - msleep(70); - - gpiod_set_value_cansleep(priv->gpio_headphone, priv->speaker_en); - + queue_delayed_work(system_wq, &priv->pcm_pop_work, msecs_to_jiffies(70)); return 0; }
@@ -344,6 +373,7 @@ static int sof_es8336_hw_params(struct snd_pcm_substream *substream, /* machine stream operations */ static struct snd_soc_ops sof_es8336_ops = { .hw_params = sof_es8336_hw_params, + .trigger = sof_8336_trigger, };
static struct snd_soc_dai_link_component platform_component[] = { @@ -722,7 +752,8 @@ static int sof_es8336_probe(struct platform_device *pdev) }
INIT_LIST_HEAD(&priv->hdmi_pcm_list); - + INIT_DELAYED_WORK(&priv->pcm_pop_work, + pcm_pop_work_events); snd_soc_card_set_drvdata(card, priv);
if (mach->mach_params.dmic_num > 0) { @@ -751,6 +782,7 @@ static int sof_es8336_remove(struct platform_device *pdev) struct snd_soc_card *card = platform_get_drvdata(pdev); struct sof_es8336_private *priv = snd_soc_card_get_drvdata(card);
+ cancel_delayed_work(&priv->pcm_pop_work); gpiod_put(priv->gpio_speakers); device_remove_software_node(priv->codec_dev); put_device(priv->codec_dev); diff --git a/sound/soc/intel/common/soc-acpi-intel-icl-match.c b/sound/soc/intel/common/soc-acpi-intel-icl-match.c index b032bc07de8b..d0062f2cd256 100644 --- a/sound/soc/intel/common/soc-acpi-intel-icl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-icl-match.c @@ -10,6 +10,11 @@ #include <sound/soc-acpi-intel-match.h> #include "../skylake/skl.h"
+static const struct snd_soc_acpi_codecs essx_83x6 = { + .num_codecs = 3, + .codecs = { "ESSX8316", "ESSX8326", "ESSX8336"}, +}; + static struct skl_machine_pdata icl_pdata = { .use_tplg_pcm = true, }; @@ -27,6 +32,14 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_machines[] = { .drv_name = "sof_rt5682", .sof_tplg_filename = "sof-icl-rt5682.tplg", }, + { + .comp_ids = &essx_83x6, + .drv_name = "sof-essx8336", + .sof_tplg_filename = "sof-icl-es8336", /* the tplg suffix is added at run time */ + .tplg_quirk_mask = SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER | + SND_SOC_ACPI_TPLG_INTEL_SSP_MSB | + SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER, + }, {}, }; EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_icl_machines); diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 4d9b91e7e14f..f6a996f0f9c7 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -800,11 +800,6 @@ static int __soc_pcm_open(struct snd_soc_pcm_runtime *rtd, ret = snd_soc_dai_startup(dai, substream); if (ret < 0) goto err; - - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - dai->tx_mask = 0; - else - dai->rx_mask = 0; }
/* Dynamic PCM DAI links compat checks use dynamic capabilities */ diff --git a/sound/soc/sof/ipc3-topology.c b/sound/soc/sof/ipc3-topology.c index a39b43850f0e..bf8a46463cec 100644 --- a/sound/soc/sof/ipc3-topology.c +++ b/sound/soc/sof/ipc3-topology.c @@ -2242,6 +2242,7 @@ static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verif struct sof_ipc_fw_version *v = &sdev->fw_ready.version; struct snd_sof_widget *swidget; struct snd_sof_route *sroute; + bool dyn_widgets = false; int ret;
/* @@ -2251,12 +2252,14 @@ static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verif * topology loading the sound card unavailable to open PCMs. */ list_for_each_entry(swidget, &sdev->widget_list, list) { - if (swidget->dynamic_pipeline_widget) + if (swidget->dynamic_pipeline_widget) { + dyn_widgets = true; continue; + }
- /* Do not free widgets for static pipelines with FW ABI older than 3.19 */ + /* Do not free widgets for static pipelines with FW older than SOF2.2 */ if (!verify && !swidget->dynamic_pipeline_widget && - v->abi_version < SOF_ABI_VER(3, 19, 0)) { + SOF_FW_VER(v->major, v->minor, v->micro) < SOF_FW_VER(2, 2, 0)) { swidget->use_count = 0; swidget->complete = 0; continue; @@ -2270,9 +2273,11 @@ static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verif /* * Tear down all pipelines associated with PCMs that did not get suspended * and unset the prepare flag so that they can be set up again during resume. - * Skip this step for older firmware. + * Skip this step for older firmware unless topology has any + * dynamic pipeline (in which case the step is mandatory). */ - if (!verify && v->abi_version >= SOF_ABI_VER(3, 19, 0)) { + if (!verify && (dyn_widgets || SOF_FW_VER(v->major, v->minor, v->micro) >= + SOF_FW_VER(2, 2, 0))) { ret = sof_tear_down_left_over_pipelines(sdev); if (ret < 0) { dev_err(sdev->dev, "failed to tear down paused pipelines\n"); diff --git a/sound/soc/stm/stm32_adfsdm.c b/sound/soc/stm/stm32_adfsdm.c index 643fc8a17018..837c1848d9bf 100644 --- a/sound/soc/stm/stm32_adfsdm.c +++ b/sound/soc/stm/stm32_adfsdm.c @@ -304,6 +304,11 @@ static int stm32_adfsdm_dummy_cb(const void *data, void *private) return 0; }
+static void stm32_adfsdm_cleanup(void *data) +{ + iio_channel_release_all_cb(data); +} + static struct snd_soc_component_driver stm32_adfsdm_soc_platform = { .open = stm32_adfsdm_pcm_open, .close = stm32_adfsdm_pcm_close, @@ -350,6 +355,12 @@ static int stm32_adfsdm_probe(struct platform_device *pdev) if (IS_ERR(priv->iio_cb)) return PTR_ERR(priv->iio_cb);
+ ret = devm_add_action_or_reset(&pdev->dev, stm32_adfsdm_cleanup, priv->iio_cb); + if (ret < 0) { + dev_err(&pdev->dev, "Unable to add action\n"); + return ret; + } + component = devm_kzalloc(&pdev->dev, sizeof(*component), GFP_KERNEL); if (!component) return -ENOMEM; diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 2420dc994632..4c9ea13f72d4 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -923,7 +923,8 @@ void snd_usb_endpoint_close(struct snd_usb_audio *chip, usb_audio_dbg(chip, "Closing EP 0x%x (count %d)\n", ep->ep_num, ep->opened);
- if (!--ep->iface_ref->opened) + if (!--ep->iface_ref->opened && + !(chip->quirk_flags & QUIRK_FLAG_IFACE_SKIP_CLOSE)) endpoint_set_interface(chip, ep, false);
if (!--ep->opened) { diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 250bda7cda07..4f914dce6bbf 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -2186,6 +2186,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { QUIRK_FLAG_GENERIC_IMPLICIT_FB), DEVICE_FLG(0x2b53, 0x0031, /* Fiero SC-01 (firmware v1.1.0) */ QUIRK_FLAG_GENERIC_IMPLICIT_FB), + DEVICE_FLG(0x0525, 0xa4ad, /* Hamedal C20 usb camero */ + QUIRK_FLAG_IFACE_SKIP_CLOSE),
/* Vendor matches */ VENDOR_FLG(0x045e, /* MS Lifecam */ diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index 2c6575029b1c..e97141ef730a 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h @@ -170,6 +170,8 @@ extern bool snd_usb_skip_validation; * Apply the generic implicit feedback sync mode (same as implicit_fb=1 option) * QUIRK_FLAG_SKIP_IMPLICIT_FB * Don't apply implicit feedback sync mode + * QUIRK_FLAG_IFACE_SKIP_CLOSE + * Don't closed interface during setting sample rate */
#define QUIRK_FLAG_GET_SAMPLE_RATE (1U << 0) @@ -191,5 +193,6 @@ extern bool snd_usb_skip_validation; #define QUIRK_FLAG_SET_IFACE_FIRST (1U << 16) #define QUIRK_FLAG_GENERIC_IMPLICIT_FB (1U << 17) #define QUIRK_FLAG_SKIP_IMPLICIT_FB (1U << 18) +#define QUIRK_FLAG_IFACE_SKIP_CLOSE (1U << 19)
#endif /* __USBAUDIO_H */ diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c index 2491c54a5e4f..f8deae4e26a1 100644 --- a/tools/iio/iio_generic_buffer.c +++ b/tools/iio/iio_generic_buffer.c @@ -715,12 +715,12 @@ int main(int argc, char **argv) continue; }
- toread = buf_len; } else { usleep(timedelay); - toread = 64; }
+ toread = buf_len; + read_size = read(buf_fd, data, toread * scan_size); if (read_size < 0) { if (errno == EAGAIN) { diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c index 57a83d763ec1..6dc65b2501ed 100644 --- a/tools/testing/selftests/bpf/verifier/ref_tracking.c +++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c @@ -905,3 +905,39 @@ .result_unpriv = REJECT, .errstr_unpriv = "unknown func", }, +{ + "reference tracking: try to leak released ptr reg", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), + + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_2, 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_ringbuf_reserve), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), + + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_EMIT_CALL(BPF_FUNC_ringbuf_discard), + BPF_MOV64_IMM(BPF_REG_0, 0), + + BPF_STX_MEM(BPF_DW, BPF_REG_9, BPF_REG_8, 0), + BPF_EXIT_INSN() + }, + .fixup_map_array_48b = { 4 }, + .fixup_map_ringbuf = { 11 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R8 !read_ok" +}, diff --git a/tools/testing/selftests/net/io_uring_zerocopy_tx.sh b/tools/testing/selftests/net/io_uring_zerocopy_tx.sh index 32aa6e9dacc2..9ac4456d48fc 100755 --- a/tools/testing/selftests/net/io_uring_zerocopy_tx.sh +++ b/tools/testing/selftests/net/io_uring_zerocopy_tx.sh @@ -29,7 +29,7 @@ if [[ "$#" -eq "0" ]]; then for IP in "${IPs[@]}"; do for mode in $(seq 1 3); do $0 "$IP" udp -m "$mode" -t 1 -n 32 - $0 "$IP" tcp -m "$mode" -t 1 -n 32 + $0 "$IP" tcp -m "$mode" -t 1 -n 1 done done
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index ff83ef426df5..e52b79440123 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -2105,7 +2105,7 @@ remove_tests() pm_nl_set_limits $ns2 1 3 pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow - run_tests $ns1 $ns2 10.0.1.1 0 -1 -2 slow + run_tests $ns1 $ns2 10.0.1.1 0 -1 -2 speed_10 chk_join_nr 3 3 3 chk_add_nr 1 1 chk_rm_nr 2 2 @@ -2118,7 +2118,7 @@ remove_tests() pm_nl_add_endpoint $ns1 10.0.3.1 flags signal pm_nl_add_endpoint $ns1 10.0.4.1 flags signal pm_nl_set_limits $ns2 3 3 - run_tests $ns1 $ns2 10.0.1.1 0 -3 0 slow + run_tests $ns1 $ns2 10.0.1.1 0 -3 0 speed_10 chk_join_nr 3 3 3 chk_add_nr 3 3 chk_rm_nr 3 3 invert @@ -2131,7 +2131,7 @@ remove_tests() pm_nl_add_endpoint $ns1 10.0.3.1 flags signal pm_nl_add_endpoint $ns1 10.0.14.1 flags signal pm_nl_set_limits $ns2 3 3 - run_tests $ns1 $ns2 10.0.1.1 0 -3 0 slow + run_tests $ns1 $ns2 10.0.1.1 0 -3 0 speed_10 chk_join_nr 1 1 1 chk_add_nr 3 3 chk_rm_nr 3 1 invert diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh index 0879da915014..80d36f7cfee8 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh @@ -35,8 +35,9 @@ init()
ns1="ns1-$rndh" ns2="ns2-$rndh" + ns_sbox="ns_sbox-$rndh"
- for netns in "$ns1" "$ns2";do + for netns in "$ns1" "$ns2" "$ns_sbox";do ip netns add $netns || exit $ksft_skip ip -net $netns link set lo up ip netns exec $netns sysctl -q net.mptcp.enabled=1 @@ -73,7 +74,7 @@ init()
cleanup() { - for netns in "$ns1" "$ns2"; do + for netns in "$ns1" "$ns2" "$ns_sbox"; do ip netns del $netns done rm -f "$cin" "$cout" @@ -243,7 +244,7 @@ do_mptcp_sockopt_tests() { local lret=0
- ./mptcp_sockopt + ip netns exec "$ns_sbox" ./mptcp_sockopt lret=$?
if [ $lret -ne 0 ]; then @@ -252,7 +253,7 @@ do_mptcp_sockopt_tests() return fi
- ./mptcp_sockopt -6 + ip netns exec "$ns_sbox" ./mptcp_sockopt -6 lret=$?
if [ $lret -ne 0 ]; then diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh index ffa13a957a36..40aeb5a71a2a 100755 --- a/tools/testing/selftests/net/mptcp/simult_flows.sh +++ b/tools/testing/selftests/net/mptcp/simult_flows.sh @@ -247,9 +247,10 @@ run_test() tc -n $ns2 qdisc add dev ns2eth1 root netem rate ${rate1}mbit $delay1 tc -n $ns2 qdisc add dev ns2eth2 root netem rate ${rate2}mbit $delay2
- # time is measured in ms, account for transfer size, affegated link speed + # time is measured in ms, account for transfer size, aggregated link speed # and header overhead (10%) - local time=$((size * 8 * 1000 * 10 / (( $rate1 + $rate2) * 1024 *1024 * 9) )) + # ms byte -> bit 10% mbit -> kbit -> bit 10% + local time=$((1000 * size * 8 * 10 / ((rate1 + rate2) * 1000 * 1000 * 9) ))
# mptcp_connect will do some sleeps to allow the mp_join handshake # completion (see mptcp_connect): 200ms on each side, add some slack diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh index ebbd0b282432..6a443ca3cd3a 100755 --- a/tools/testing/selftests/net/udpgro.sh +++ b/tools/testing/selftests/net/udpgro.sh @@ -50,7 +50,7 @@ run_one() { echo "failed" &
# Hack: let bg programs complete the startup - sleep 0.1 + sleep 0.2 ./udpgso_bench_tx ${tx_args} ret=$? wait $(jobs -p) @@ -117,7 +117,7 @@ run_one_2sock() { echo "failed" &
# Hack: let bg programs complete the startup - sleep 0.1 + sleep 0.2 ./udpgso_bench_tx ${tx_args} -p 12345 sleep 0.1 # first UDP GSO socket should be closed at this point diff --git a/tools/testing/selftests/net/udpgro_bench.sh b/tools/testing/selftests/net/udpgro_bench.sh index fad2d1a71cac..8a1109a545db 100755 --- a/tools/testing/selftests/net/udpgro_bench.sh +++ b/tools/testing/selftests/net/udpgro_bench.sh @@ -39,7 +39,7 @@ run_one() { ip netns exec "${PEER_NS}" ./udpgso_bench_rx -t ${rx_args} -r &
# Hack: let bg programs complete the startup - sleep 0.1 + sleep 0.2 ./udpgso_bench_tx ${tx_args} }
diff --git a/tools/testing/selftests/net/udpgro_frglist.sh b/tools/testing/selftests/net/udpgro_frglist.sh index 832c738cc3c2..7fe85ba51075 100755 --- a/tools/testing/selftests/net/udpgro_frglist.sh +++ b/tools/testing/selftests/net/udpgro_frglist.sh @@ -44,7 +44,7 @@ run_one() { ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -r &
# Hack: let bg programs complete the startup - sleep 0.1 + sleep 0.2 ./udpgso_bench_tx ${tx_args} }
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index 346e47f15572..7c248193ca26 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -297,7 +297,12 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, if (!gpc->valid || old_uhva != gpc->uhva) { ret = hva_to_pfn_retry(kvm, gpc); } else { - /* If the HVA→PFN mapping was already valid, don't unmap it. */ + /* + * If the HVA→PFN mapping was already valid, don't unmap it. + * But do update gpc->khva because the offset within the page + * may have changed. + */ + gpc->khva = old_khva + page_offset; old_pfn = KVM_PFN_ERR_FAULT; old_khva = NULL; ret = 0;
linux-stable-mirror@lists.linaro.org