I'm announcing the release of the 6.6.103 kernel.
All users of the 6.6 kernel series must upgrade.
The updated 6.6.y git tree can be found at: git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-6.6.y and can be browsed at the normal kernel.org git web browser: https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git%3Ba=summa...
thanks,
greg k-h
------------
Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dpu.yaml | 2 Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dsi-host.yaml | 2 Documentation/filesystems/fscrypt.rst | 37 - Documentation/firmware-guide/acpi/i2c-muxes.rst | 8 Documentation/networking/bonding.rst | 12 Documentation/networking/mptcp-sysctl.rst | 2 Documentation/power/runtime_pm.rst | 5 Makefile | 4 arch/arm/include/asm/topology.h | 1 arch/arm/mach-rockchip/platsmp.c | 15 arch/arm/mach-tegra/reset.c | 2 arch/arm64/boot/dts/ti/k3-am62-main.dtsi | 1 arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi | 12 arch/arm64/boot/dts/ti/k3-am62a7-sk.dts | 4 arch/arm64/boot/dts/ti/k3-pinctrl.h | 15 arch/arm64/include/asm/acpi.h | 2 arch/arm64/include/asm/topology.h | 1 arch/arm64/kernel/fpsimd.c | 4 arch/arm64/kernel/topology.c | 26 arch/arm64/kernel/traps.c | 1 arch/arm64/mm/fault.c | 1 arch/arm64/mm/ptdump_debugfs.c | 3 arch/loongarch/kernel/module-sections.c | 36 - arch/loongarch/net/bpf_jit.c | 21 arch/m68k/kernel/head.S | 31 arch/mips/crypto/chacha-core.S | 20 arch/mips/include/asm/vpe.h | 8 arch/mips/kernel/process.c | 16 arch/mips/lantiq/falcon/sysctrl.c | 23 arch/parisc/Makefile | 6 arch/parisc/include/asm/pgtable.h | 7 arch/parisc/include/asm/special_insns.h | 28 arch/parisc/include/asm/uaccess.h | 21 arch/parisc/kernel/cache.c | 6 arch/parisc/kernel/entry.S | 17 arch/parisc/kernel/syscall.S | 30 arch/parisc/lib/memcpy.c | 19 arch/parisc/mm/fault.c | 4 arch/powerpc/boot/Makefile | 1 arch/powerpc/include/asm/floppy.h | 5 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c | 6 arch/riscv/include/asm/topology.h | 1 arch/s390/hypfs/hypfs_dbfs.c | 19 arch/s390/include/asm/timex.h | 13 arch/s390/kernel/time.c | 2 arch/s390/mm/dump_pagetables.c | 2 arch/s390/mm/pgalloc.c | 5 arch/um/include/asm/thread_info.h | 4 arch/um/kernel/process.c | 20 arch/x86/include/asm/kvm-x86-ops.h | 2 arch/x86/include/asm/kvm_host.h | 22 arch/x86/include/asm/msr-index.h | 1 arch/x86/include/asm/xen/hypercall.h | 5 arch/x86/kernel/cpu/bugs.c | 5 arch/x86/kernel/cpu/hygon.c | 3 arch/x86/kvm/hyperv.c | 3 arch/x86/kvm/lapic.c | 19 arch/x86/kvm/lapic.h | 1 arch/x86/kvm/svm/svm.c | 42 - arch/x86/kvm/svm/vmenter.S | 9 arch/x86/kvm/trace.h | 9 arch/x86/kvm/vmx/nested.c | 26 arch/x86/kvm/vmx/pmu_intel.c | 8 arch/x86/kvm/vmx/vmx.c | 164 ++-- arch/x86/kvm/vmx/vmx.h | 31 arch/x86/kvm/x86.c | 46 - block/blk-core.c | 26 block/blk-settings.c | 2 crypto/jitterentropy-kcapi.c | 9 drivers/acpi/acpi_processor.c | 2 drivers/acpi/apei/ghes.c | 13 drivers/acpi/pfr_update.c | 2 drivers/acpi/prmt.c | 26 drivers/acpi/processor_perflib.c | 11 drivers/ata/Kconfig | 35 drivers/ata/libata-sata.c | 5 drivers/ata/libata-scsi.c | 58 - drivers/base/arch_topology.c | 69 + drivers/base/power/runtime.c | 59 + drivers/block/drbd/drbd_receiver.c | 6 drivers/block/loop.c | 38 - drivers/block/sunvdc.c | 4 drivers/bus/mhi/host/boot.c | 8 drivers/bus/mhi/host/internal.h | 4 drivers/bus/mhi/host/main.c | 12 drivers/cdx/controller/cdx_rpmsg.c | 3 drivers/char/ipmi/ipmi_msghandler.c | 8 drivers/char/ipmi/ipmi_watchdog.c | 59 + drivers/char/misc.c | 4 drivers/clk/qcom/gcc-ipq5018.c | 2 drivers/clk/tegra/clk-periph.c | 4 drivers/comedi/comedi_fops.c | 36 - drivers/comedi/comedi_internal.h | 1 drivers/comedi/drivers.c | 36 - drivers/comedi/drivers/pcl726.c | 3 drivers/cpufreq/armada-8k-cpufreq.c | 2 drivers/cpufreq/cppc_cpufreq.c | 2 drivers/cpufreq/cpufreq.c | 12 drivers/cpuidle/governors/menu.c | 96 -- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 8 drivers/crypto/intel/qat/qat_common/adf_common_drv.h | 1 drivers/crypto/intel/qat/qat_common/adf_init.c | 1 drivers/crypto/intel/qat/qat_common/adf_isr.c | 5 drivers/crypto/intel/qat/qat_common/qat_algs.c | 12 drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c | 16 drivers/devfreq/governor_userspace.c | 6 drivers/dma/stm32-dma.c | 2 drivers/edac/synopsys_edac.c | 97 +- drivers/firmware/tegra/Kconfig | 5 drivers/fpga/zynq-fpga.c | 10 drivers/gpio/gpio-mlxbf2.c | 2 drivers/gpio/gpio-mlxbf3.c | 54 - drivers/gpio/gpio-tps65912.c | 7 drivers/gpio/gpio-virtio.c | 9 drivers/gpio/gpio-wcd934x.c | 7 drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c | 4 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c | 57 - drivers/gpu/drm/amd/amdkfd/kfd_module.c | 2 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c | 9 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c | 6 drivers/gpu/drm/amd/display/dc/bios/command_table.c | 2 drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c | 1 drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c | 2 drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c | 40 - drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c | 31 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 11 drivers/gpu/drm/amd/display/dc/link/link_dpms.c | 3 drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c | 3 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 6 drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c | 37 - drivers/gpu/drm/display/drm_dp_helper.c | 2 drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c | 4 drivers/gpu/drm/i915/intel_runtime_pm.c | 5 drivers/gpu/drm/msm/msm_gem.c | 3 drivers/gpu/drm/msm/msm_gem.h | 6 drivers/gpu/drm/nouveau/nvif/vmm.c | 3 drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c | 3 drivers/gpu/drm/ttm/ttm_pool.c | 8 drivers/gpu/drm/ttm/ttm_resource.c | 3 drivers/hid/hid-apple.c | 13 drivers/hid/hid-magicmouse.c | 56 + drivers/hwmon/emc2305.c | 10 drivers/hwmon/gsc-hwmon.c | 4 drivers/i2c/i2c-core-acpi.c | 1 drivers/i3c/internals.h | 1 drivers/i3c/master.c | 4 drivers/idle/intel_idle.c | 2 drivers/iio/adc/ad7768-1.c | 23 drivers/iio/adc/ad_sigma_delta.c | 6 drivers/iio/imu/bno055/bno055.c | 11 drivers/iio/imu/inv_icm42600/inv_icm42600.h | 8 drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c | 33 drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c | 22 drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h | 10 drivers/iio/imu/inv_icm42600/inv_icm42600_core.c | 6 drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c | 43 - drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c | 12 drivers/iio/light/as73211.c | 2 drivers/iio/pressure/bmp280-core.c | 9 drivers/iio/proximity/isl29501.c | 16 drivers/iio/temperature/maxim_thermocouple.c | 26 drivers/infiniband/core/nldev.c | 22 drivers/infiniband/hw/bnxt_re/ib_verbs.c | 8 drivers/infiniband/hw/bnxt_re/qplib_fp.c | 30 drivers/infiniband/hw/bnxt_re/qplib_fp.h | 2 drivers/infiniband/hw/bnxt_re/qplib_res.c | 2 drivers/infiniband/hw/erdma/erdma_verbs.c | 4 drivers/infiniband/hw/hfi1/affinity.c | 44 - drivers/infiniband/sw/siw/siw_qp_tx.c | 5 drivers/iommu/amd/init.c | 4 drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c | 1 drivers/iommu/iommufd/io_pagetable.c | 48 - drivers/leds/flash/leds-qcom-flash.c | 178 ++++ drivers/leds/leds-lp50xx.c | 11 drivers/leds/trigger/ledtrig-netdev.c | 16 drivers/md/dm-ps-historical-service-time.c | 4 drivers/md/dm-ps-queue-length.c | 4 drivers/md/dm-ps-round-robin.c | 4 drivers/md/dm-ps-service-time.c | 4 drivers/md/dm-table.c | 10 drivers/md/dm-zoned-target.c | 2 drivers/media/cec/usb/rainshadow/rainshadow-cec.c | 3 drivers/media/dvb-frontends/dib7000p.c | 8 drivers/media/i2c/ccs/ccs-core.c | 2 drivers/media/i2c/hi556.c | 26 drivers/media/i2c/ov2659.c | 3 drivers/media/i2c/tc358743.c | 86 +- drivers/media/pci/intel/ivsc/mei_ace.c | 2 drivers/media/pci/intel/ivsc/mei_csi.c | 2 drivers/media/platform/qcom/camss/camss.c | 4 drivers/media/platform/qcom/venus/core.c | 8 drivers/media/platform/qcom/venus/core.h | 2 drivers/media/platform/qcom/venus/hfi_msgs.c | 83 +- drivers/media/platform/qcom/venus/hfi_venus.c | 5 drivers/media/platform/qcom/venus/vdec.c | 5 drivers/media/platform/qcom/venus/venc.c | 5 drivers/media/platform/verisilicon/rockchip_vpu_hw.c | 9 drivers/media/test-drivers/vivid/vivid-ctrls.c | 3 drivers/media/test-drivers/vivid/vivid-vid-cap.c | 4 drivers/media/usb/gspca/vicam.c | 10 drivers/media/usb/hdpvr/hdpvr-i2c.c | 6 drivers/media/usb/usbtv/usbtv-video.c | 4 drivers/media/usb/uvc/uvc_driver.c | 3 drivers/media/usb/uvc/uvc_video.c | 21 drivers/media/v4l2-core/v4l2-common.c | 8 drivers/media/v4l2-core/v4l2-ctrls-core.c | 1 drivers/memstick/core/memstick.c | 1 drivers/memstick/host/rtsx_usb_ms.c | 1 drivers/mfd/axp20x.c | 3 drivers/misc/cardreader/rtsx_usb.c | 16 drivers/misc/mei/bus.c | 6 drivers/mmc/host/rtsx_usb_sdmmc.c | 4 drivers/mmc/host/sdhci-msm.c | 14 drivers/mmc/host/sdhci-pci-gli.c | 35 drivers/most/core.c | 2 drivers/mtd/nand/raw/fsmc_nand.c | 2 drivers/mtd/nand/raw/renesas-nand-controller.c | 6 drivers/mtd/nand/spi/core.c | 5 drivers/mtd/spi-nor/swp.c | 19 drivers/net/bonding/bond_3ad.c | 224 +++++- drivers/net/bonding/bond_main.c | 1 drivers/net/bonding/bond_netlink.c | 16 drivers/net/bonding/bond_options.c | 29 drivers/net/dsa/b53/b53_common.c | 65 + drivers/net/dsa/b53/b53_regs.h | 2 drivers/net/ethernet/agere/et131x.c | 36 + drivers/net/ethernet/aquantia/atlantic/aq_hw.h | 2 drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c | 39 + drivers/net/ethernet/atheros/ag71xx.c | 9 drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 4 drivers/net/ethernet/emulex/benet/be_main.c | 8 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 2 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c | 4 drivers/net/ethernet/freescale/enetc/enetc_pf.c | 14 drivers/net/ethernet/freescale/fec_main.c | 34 drivers/net/ethernet/freescale/gianfar_ethtool.c | 4 drivers/net/ethernet/google/gve/gve_adminq.c | 1 drivers/net/ethernet/google/gve/gve_main.c | 2 drivers/net/ethernet/intel/igc/igc_main.c | 14 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 4 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c | 4 drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 2 drivers/net/ethernet/mediatek/mtk_wed.c | 1 drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c | 18 drivers/net/ethernet/mellanox/mlx5/core/en/qos.c | 2 drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c | 4 drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 drivers/net/ethernet/mellanox/mlxsw/trap.h | 1 drivers/net/ethernet/pensando/ionic/ionic_lif.c | 7 drivers/net/ethernet/ti/icssg/icss_iep.c | 26 drivers/net/hyperv/hyperv_net.h | 3 drivers/net/hyperv/netvsc_drv.c | 29 drivers/net/ipa/ipa_smp2p.c | 2 drivers/net/phy/micrel.c | 12 drivers/net/phy/mscc/mscc.h | 12 drivers/net/phy/mscc/mscc_main.c | 12 drivers/net/phy/mscc/mscc_ptp.c | 49 + drivers/net/phy/smsc.c | 1 drivers/net/ppp/ppp_generic.c | 17 drivers/net/thunderbolt/main.c | 21 drivers/net/usb/asix_devices.c | 1 drivers/net/usb/cdc_ncm.c | 20 drivers/net/wireless/ath/ath11k/ce.c | 3 drivers/net/wireless/ath/ath11k/dp_rx.c | 3 drivers/net/wireless/ath/ath11k/hal.c | 33 drivers/net/wireless/ath/ath12k/ce.c | 3 drivers/net/wireless/ath/ath12k/dp.c | 3 drivers/net/wireless/ath/ath12k/hal.c | 38 - drivers/net/wireless/ath/ath12k/hw.c | 2 drivers/net/wireless/ath/ath12k/wmi.c | 5 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c | 2 drivers/net/wireless/intel/iwlegacy/4965-mac.c | 5 drivers/net/wireless/intel/iwlwifi/dvm/rs.c | 2 drivers/net/wireless/intel/iwlwifi/fw/dbg.c | 7 drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 2 drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 2 drivers/net/wireless/mediatek/mt76/mt7915/mcu.c | 25 drivers/net/wireless/realtek/rtlwifi/pci.c | 23 drivers/net/wireless/realtek/rtw89/core.c | 3 drivers/net/wireless/realtek/rtw89/fw.c | 9 drivers/net/wireless/realtek/rtw89/fw.h | 2 drivers/net/wireless/realtek/rtw89/mac.c | 19 drivers/net/wireless/realtek/rtw89/reg.h | 1 drivers/net/xen-netfront.c | 5 drivers/nvme/host/pci.c | 24 drivers/pci/controller/dwc/pci-imx6.c | 7 drivers/pci/controller/pcie-rockchip-host.c | 49 - drivers/pci/controller/pcie-rockchip.h | 11 drivers/pci/endpoint/pci-ep-cfs.c | 1 drivers/pci/endpoint/pci-epf-core.c | 2 drivers/pci/pci-acpi.c | 4 drivers/pci/pci.c | 10 drivers/pci/probe.c | 2 drivers/perf/cxl_pmu.c | 2 drivers/phy/qualcomm/phy-qcom-m31.c | 14 drivers/phy/rockchip/phy-rockchip-pcie.c | 3 drivers/pinctrl/stm32/pinctrl-stm32.c | 1 drivers/platform/chrome/cros_ec.c | 3 drivers/platform/chrome/cros_ec_typec.c | 4 drivers/platform/x86/amd/pmc/pmc-quirks.c | 9 drivers/platform/x86/thinkpad_acpi.c | 4 drivers/pmdomain/imx/imx8m-blk-ctrl.c | 10 drivers/power/supply/qcom_battmgr.c | 2 drivers/pps/clients/pps-gpio.c | 5 drivers/ptp/ptp_clock.c | 2 drivers/ptp/ptp_private.h | 5 drivers/ptp/ptp_vclock.c | 7 drivers/pwm/pwm-imx-tpm.c | 9 drivers/pwm/pwm-mediatek.c | 71 + drivers/remoteproc/imx_rproc.c | 4 drivers/reset/Kconfig | 10 drivers/rtc/rtc-ds1307.c | 15 drivers/s390/char/sclp.c | 11 drivers/scsi/aacraid/comminit.c | 3 drivers/scsi/bfa/bfad_im.c | 1 drivers/scsi/libiscsi.c | 3 drivers/scsi/lpfc/lpfc_debugfs.c | 1 drivers/scsi/lpfc/lpfc_scsi.c | 4 drivers/scsi/mpi3mr/mpi3mr.h | 6 drivers/scsi/mpi3mr/mpi3mr_fw.c | 17 drivers/scsi/mpi3mr/mpi3mr_os.c | 22 drivers/scsi/mpt3sas/mpt3sas_scsih.c | 19 drivers/scsi/qla4xxx/ql4_os.c | 2 drivers/scsi/scsi_scan.c | 2 drivers/scsi/scsi_transport_sas.c | 60 + drivers/soc/qcom/mdt_loader.c | 53 + drivers/soc/qcom/rpmh-rsc.c | 2 drivers/soc/tegra/pmc.c | 51 - drivers/soundwire/amd_manager.c | 6 drivers/soundwire/bus.c | 6 drivers/spi/spi-fsl-lpspi.c | 8 drivers/staging/media/imx/imx-media-csc-scaler.c | 2 drivers/target/target_core_fabric_lib.c | 63 + drivers/target/target_core_internal.h | 4 drivers/target/target_core_pr.c | 18 drivers/thermal/qcom/qcom-spmi-temp-alarm.c | 43 - drivers/thermal/thermal_sysfs.c | 9 drivers/thunderbolt/domain.c | 2 drivers/tty/serial/8250/8250_port.c | 3 drivers/tty/vt/defkeymap.c_shipped | 112 +++ drivers/tty/vt/keyboard.c | 2 drivers/ufs/core/ufshcd-priv.h | 2 drivers/ufs/host/ufs-exynos.c | 4 drivers/ufs/host/ufshcd-pci.c | 42 + drivers/usb/atm/cxacru.c | 106 +- drivers/usb/class/cdc-acm.c | 11 drivers/usb/core/config.c | 10 drivers/usb/core/hcd.c | 8 drivers/usb/core/quirks.c | 1 drivers/usb/core/urb.c | 2 drivers/usb/dwc3/dwc3-imx8mp.c | 6 drivers/usb/dwc3/dwc3-meson-g12a.c | 3 drivers/usb/dwc3/dwc3-pci.c | 2 drivers/usb/dwc3/ep0.c | 20 drivers/usb/dwc3/gadget.c | 19 drivers/usb/gadget/udc/renesas_usb3.c | 1 drivers/usb/host/xhci-hub.c | 3 drivers/usb/host/xhci-mem.c | 24 drivers/usb/host/xhci-pci-renesas.c | 7 drivers/usb/host/xhci-ring.c | 19 drivers/usb/host/xhci.c | 27 drivers/usb/host/xhci.h | 3 drivers/usb/musb/omap2430.c | 14 drivers/usb/storage/realtek_cr.c | 2 drivers/usb/storage/unusual_devs.h | 29 drivers/usb/typec/mux/intel_pmc_mux.c | 2 drivers/usb/typec/tcpm/fusb302.c | 8 drivers/usb/typec/tcpm/maxim_contaminant.c | 54 + drivers/usb/typec/tcpm/tcpci_maxim.h | 1 drivers/usb/typec/ucsi/psy.c | 2 drivers/usb/typec/ucsi/ucsi.c | 1 drivers/usb/typec/ucsi/ucsi.h | 7 drivers/vfio/pci/mlx5/cmd.c | 4 drivers/vfio/vfio_iommu_type1.c | 7 drivers/vhost/vhost.c | 3 drivers/vhost/vsock.c | 6 drivers/video/console/vgacon.c | 2 drivers/video/fbdev/core/fbcon.c | 9 drivers/video/fbdev/core/fbmem.c | 3 drivers/virt/coco/efi_secret/efi_secret.c | 10 drivers/watchdog/dw_wdt.c | 2 drivers/watchdog/iTCO_wdt.c | 6 drivers/watchdog/sbsa_gwdt.c | 50 + fs/btrfs/backref.c | 6 fs/btrfs/block-group.c | 61 + fs/btrfs/block-group.h | 11 fs/btrfs/block-rsv.c | 2 fs/btrfs/block-rsv.h | 2 fs/btrfs/btrfs_inode.h | 3 fs/btrfs/ctree.c | 24 fs/btrfs/ctree.h | 6 fs/btrfs/delayed-inode.c | 12 fs/btrfs/discard.c | 4 fs/btrfs/extent-tree.c | 33 fs/btrfs/file-item.c | 4 fs/btrfs/file-item.h | 2 fs/btrfs/free-space-tree.c | 17 fs/btrfs/inode-item.c | 10 fs/btrfs/inode-item.h | 4 fs/btrfs/inode.c | 26 fs/btrfs/qgroup.c | 31 fs/btrfs/relocation.c | 19 fs/btrfs/send.c | 359 +++++----- fs/btrfs/space-info.c | 17 fs/btrfs/space-info.h | 6 fs/btrfs/tree-log.c | 72 +- fs/btrfs/tree-mod-log.c | 14 fs/btrfs/tree-mod-log.h | 6 fs/btrfs/zoned.c | 20 fs/btrfs/zoned.h | 4 fs/buffer.c | 2 fs/crypto/fscrypt_private.h | 16 fs/crypto/hkdf.c | 2 fs/crypto/keysetup.c | 3 fs/crypto/keysetup_v1.c | 3 fs/eventpoll.c | 60 + fs/exfat/dir.c | 12 fs/exfat/fatent.c | 10 fs/exfat/namei.c | 5 fs/exfat/super.c | 32 fs/ext2/inode.c | 12 fs/ext4/fsmap.c | 23 fs/ext4/indirect.c | 4 fs/ext4/inline.c | 19 fs/ext4/inode.c | 2 fs/ext4/mballoc.c | 67 - fs/ext4/orphan.c | 5 fs/ext4/super.c | 8 fs/f2fs/file.c | 24 fs/f2fs/node.c | 10 fs/file.c | 90 +- fs/gfs2/meta_io.c | 2 fs/hfs/bfind.c | 3 fs/hfs/bnode.c | 93 ++ fs/hfs/btree.c | 57 + fs/hfs/extent.c | 2 fs/hfs/hfs_fs.h | 1 fs/hfsplus/bnode.c | 92 ++ fs/hfsplus/unicode.c | 7 fs/hfsplus/xattr.c | 6 fs/hugetlbfs/inode.c | 2 fs/jbd2/checkpoint.c | 1 fs/jfs/file.c | 3 fs/jfs/inode.c | 2 fs/jfs/jfs_dmap.c | 6 fs/libfs.c | 4 fs/namespace.c | 34 fs/nfs/blocklayout/blocklayout.c | 4 fs/nfs/blocklayout/dev.c | 5 fs/nfs/blocklayout/extent_tree.c | 20 fs/nfs/client.c | 44 + fs/nfs/internal.h | 2 fs/nfs/nfs4client.c | 20 fs/nfs/nfs4proc.c | 2 fs/nfs/pnfs.c | 11 fs/nfsd/nfs4state.c | 34 fs/ntfs3/dir.c | 3 fs/ntfs3/inode.c | 31 fs/orangefs/orangefs-debugfs.c | 2 fs/smb/client/cifssmb.c | 10 fs/smb/client/connect.c | 10 fs/smb/client/sess.c | 9 fs/smb/client/smb2ops.c | 11 fs/smb/client/smbdirect.c | 24 fs/smb/server/connection.c | 3 fs/smb/server/connection.h | 7 fs/smb/server/oplock.c | 13 fs/smb/server/smb2pdu.c | 16 fs/smb/server/transport_rdma.c | 5 fs/smb/server/transport_rdma.h | 4 fs/smb/server/transport_tcp.c | 26 fs/squashfs/super.c | 14 fs/tracefs/inode.c | 11 fs/udf/super.c | 13 fs/xfs/xfs_itable.c | 6 include/linux/arch_topology.h | 8 include/linux/blk_types.h | 6 include/linux/compiler.h | 8 include/linux/cpufreq.h | 1 include/linux/energy_model.h | 6 include/linux/fs.h | 4 include/linux/hypervisor.h | 3 include/linux/if_vlan.h | 21 include/linux/iosys-map.h | 7 include/linux/memfd.h | 14 include/linux/mm.h | 82 +- include/linux/pci.h | 10 include/linux/pm_runtime.h | 18 include/linux/sched/topology.h | 8 include/linux/skbuff.h | 8 include/linux/usb/cdc_ncm.h | 1 include/linux/virtio_vsock.h | 7 include/net/bond_3ad.h | 3 include/net/bond_options.h | 1 include/net/bonding.h | 23 include/net/cfg80211.h | 2 include/net/mac80211.h | 2 include/net/neighbour.h | 1 include/net/net_namespace.h | 16 include/net/sock.h | 1 include/trace/events/btrfs.h | 6 include/trace/events/thp.h | 2 include/uapi/linux/if_link.h | 1 include/uapi/linux/in6.h | 4 include/uapi/linux/io_uring.h | 2 include/uapi/linux/pfrut.h | 1 io_uring/net.c | 19 kernel/bpf/verifier.c | 3 kernel/cgroup/cpuset.c | 2 kernel/fork.c | 2 kernel/module/main.c | 10 kernel/power/console.c | 7 kernel/rcu/tree.c | 2 kernel/rcu/tree.h | 14 kernel/rcu/tree_plugin.h | 44 - kernel/sched/cpufreq_schedutil.c | 30 kernel/sched/fair.c | 19 kernel/trace/ftrace.c | 19 kernel/trace/trace.c | 33 kernel/trace/trace.h | 10 mm/debug_vm_pgtable.c | 9 mm/filemap.c | 2 mm/kmemleak.c | 10 mm/madvise.c | 2 mm/memfd.c | 2 mm/memory-failure.c | 8 mm/mmap.c | 12 mm/ptdump.c | 2 mm/shmem.c | 2 net/bluetooth/hci_conn.c | 3 net/bluetooth/hci_event.c | 8 net/bluetooth/hci_sock.c | 2 net/bridge/br_multicast.c | 16 net/bridge/br_private.h | 2 net/core/dev.c | 12 net/core/neighbour.c | 12 net/core/net_namespace.c | 8 net/core/sock.c | 27 net/hsr/hsr_slave.c | 8 net/ipv4/netfilter/nf_reject_ipv4.c | 6 net/ipv4/route.c | 1 net/ipv4/udp_offload.c | 2 net/ipv6/addrconf.c | 7 net/ipv6/mcast.c | 11 net/ipv6/netfilter/nf_reject_ipv6.c | 5 net/ipv6/seg6_hmac.c | 6 net/mac80211/cfg.c | 12 net/mac80211/chan.c | 1 net/mac80211/mlme.c | 9 net/mac80211/rx.c | 12 net/mctp/af_mctp.c | 26 net/mptcp/options.c | 6 net/mptcp/pm_netlink.c | 19 net/mptcp/subflow.c | 5 net/ncsi/internal.h | 2 net/ncsi/ncsi-rsp.c | 1 net/netfilter/nf_conntrack_netlink.c | 24 net/netlink/af_netlink.c | 12 net/rds/tcp.c | 8 net/sched/sch_cake.c | 14 net/sched/sch_ets.c | 36 - net/sched/sch_htb.c | 2 net/sctp/input.c | 2 net/smc/af_smc.c | 8 net/sunrpc/svcsock.c | 5 net/sunrpc/xprtsock.c | 8 net/tls/tls.h | 2 net/tls/tls_strp.c | 11 net/tls/tls_sw.c | 10 net/vmw_vsock/virtio_transport.c | 14 net/wireless/mlme.c | 3 net/xfrm/xfrm_state.c | 72 +- scripts/kconfig/gconf.c | 8 scripts/kconfig/lxdialog/inputbox.c | 6 scripts/kconfig/lxdialog/menubox.c | 2 scripts/kconfig/nconf.c | 2 scripts/kconfig/nconf.gui.c | 1 security/apparmor/file.c | 6 security/apparmor/include/lib.h | 6 security/inode.c | 2 sound/core/pcm_native.c | 19 sound/hda/hdac_device.c | 2 sound/pci/hda/hda_codec.c | 42 - sound/pci/hda/patch_ca0132.c | 2 sound/pci/hda/patch_realtek.c | 4 sound/pci/intel8x0.c | 2 sound/soc/codecs/hdac_hdmi.c | 10 sound/soc/codecs/rt5640.c | 5 sound/soc/fsl/fsl_sai.c | 20 sound/soc/intel/avs/core.c | 3 sound/soc/qcom/lpass-platform.c | 27 sound/soc/soc-core.c | 3 sound/soc/soc-dapm.c | 4 sound/usb/mixer_quirks.c | 14 sound/usb/stream.c | 25 sound/usb/validate.c | 14 tools/bpf/bpftool/main.c | 6 tools/include/nolibc/std.h | 4 tools/include/nolibc/types.h | 4 tools/include/uapi/linux/if_link.h | 1 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c | 4 tools/scripts/Makefile.include | 4 tools/testing/ktest/ktest.pl | 5 tools/testing/selftests/arm64/fp/sve-ptrace.c | 3 tools/testing/selftests/bpf/prog_tests/user_ringbuf.c | 10 tools/testing/selftests/bpf/progs/verifier_unpriv.c | 2 tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc | 2 tools/testing/selftests/futex/include/futextest.h | 11 tools/testing/selftests/memfd/memfd_test.c | 43 + tools/testing/selftests/net/mptcp/pm_netlink.sh | 1 612 files changed, 6026 insertions(+), 2723 deletions(-)
Aahil Awatramani (1): bonding: Add independent control state machine
Aakash Kumar S (1): xfrm: Duplicate SPI Handling
Aaron Kling (1): ARM: tegra: Use I/O memcpy to write to IRAM
Aaron Plattner (1): watchdog: sbsa: Adjust keepalive timeout to avoid MediaTek WS0 race condition
Abel Vesa (1): power: supply: qcom_battmgr: Add lithium-polymer entry
Aditya Garg (2): HID: magicmouse: avoid setting up battery timer when not needed HID: apple: avoid setting up battery timer for devices without battery
Adrian Hunter (1): scsi: ufs: ufs-pci: Fix default runtime and system PM levels
Al Viro (5): better lockdep annotations for simple_recursive_removal() fix locking in efi_secret_unlink() securityfs: don't pin dentries twice, once is enough... use uniform permission checks for all mount propagation changes alloc_fdtable(): change calling conventions.
Alex Deucher (1): drm/amdgpu: update mmhub 3.0.1 client id mappings
Alex Guo (2): media: dvb-frontends: dib7090p: fix null-ptr-deref in dib7090p_rw_on_apb() media: dvb-frontends: w7090p: fix null-ptr-deref in w7090p_tuner_write_serpar and w7090p_tuner_read_serpar
Alexander Kochetkov (1): ARM: rockchip: fix kernel hang during smp initialization
Alexander Sverdlin (1): arm64: dts: ti: k3-pinctrl: Enable Schmitt Trigger by default
Alexander Wilhelm (1): bus: mhi: host: Fix endianness of BHI vector table
Alexey Klimov (1): iommu/arm-smmu-qcom: Add SM6115 MDSS compatible
Alok Tiwari (6): net: ti: icss-iep: Fix incorrect type for return value in extts_enable() ALSA: intel8x0: Fix incorrect codec index usage in mixer for ICH4 be2net: Use correct byte order and format string for TCP seq and ack_seq net: thunderx: Fix format-truncation warning in bgx_acpi_match_id() perf/cxlpmu: Remove unintended newline from IRQ name format string gve: Return error for unknown admin queue command
Amber Lin (1): drm/amdkfd: Destroy KFD debugfs after destroy KFD wq
Amelie Delaunay (1): dmaengine: stm32-dma: configure next sg only if there are more than 2 sgs
Amir Mohammad Jahangirzad (1): fs/orangefs: use snprintf() instead of sprintf()
Amit Sunil Dhamne (2): usb: typec: maxim_contaminant: re-enable cc toggle if cc is open and port is clean usb: typec: maxim_contaminant: disable low power mode when reading comparator values
Anantha Prabhu (1): RDMA/bnxt_re: Fix to initialize the PBL array
Andreas Dilger (1): ext4: check fast symlink for ea_inode correctly
Andrew Price (1): gfs2: Set .migrate_folio in gfs2_{rgrp,meta}_aops
André Draszik (1): scsi: ufs: exynos: Fix programming of HCI_UTRL_NEXUS_TYPE
Andy Shevchenko (2): Documentation: ACPI: Fix parent device references iio: imu: inv_icm42600: Convert to uXX and sXX integer types
Anshuman Khandual (1): mm/ptdump: take the memory hotplug lock inside ptdump_walk_pgd()
Anthoine Bourgeois (1): xen/netfront: Fix TX response spurious interrupts
Archana Patni (1): scsi: ufs: ufs-pci: Fix hibernate state transition for Intel MTL-like host controllers
Armen Ratner (1): net/mlx5e: Preserve shared buffer capacity during headroom updates
Arnaud Lecomte (1): jfs: upper bound check of tree index in dbAllocAG
Arnd Bergmann (1): RDMA/core: reduce stack using in nldev_stat_get_doit()
Artem Sadovnikov (1): vfio/mlx5: fix possible overflow in tracking max message size
Avraham Stern (1): wifi: iwlwifi: mvm: fix scan request validation
Baihan Li (1): drm/hisilicon/hibmc: fix the hibmc loaded failed bug
Baokun Li (4): ext4: fix zombie groups in average fragment size lists ext4: fix largest free orders lists corruption on mb_optimize_scan switch jbd2: prevent softlockup in jbd2_log_do_checkpoint() ext4: preserve SB_I_VERSION on remount
Bartosz Golaszewski (2): gpio: wcd934x: check the return value of regmap_update_bits() gpio: tps65912: check the return value of regmap_update_bits()
Benjamin Marzinski (1): dm-table: fix checking for rq stackable devices
Benson Leung (1): usb: typec: ucsi: psy: Set current max to 100mA for BC 1.2 and Default
Bharat Bhushan (1): crypto: octeontx2 - add timeout for load_fvc completion poll
Biju Das (1): net: phy: micrel: Add ksz9131_resume()
Bingbu Cao (1): media: hi556: correct the test pattern configuration
Bitterblue Smith (3): wifi: rtw89: Lower the timeout in rtw89_fw_read_c2h_reg() for USB wifi: rtw89: Fix rtw89_mac_power_switch() for USB wifi: rtw89: Disable deep power saving for USB/SDIO
Bjorn Andersson (2): soc: qcom: mdt_loader: Actually use the e_phoff soc: qcom: mdt_loader: Ensure we don't read past the ELF header
Bjorn Helgaas (1): mmc: sdhci-pci-gli: Use PCI AER definitions, not hard-coded values
Boris Burkov (1): btrfs: fix ssd_spread overallocation
Boshi Yu (1): RDMA/erdma: Fix ignored return value of init_kernel_qp
Breno Leitao (5): ACPI: APEI: GHES: add TAINT_MACHINE_CHECK on GHES panic path arm64: Mark kernel as tainted on SAE and SError panic ptp: Use ratelimite for freerun error message ipmi: Use dev_warn_ratelimited() for incorrect message warnings mm/kmemleak: avoid deadlock by moving pr_warn() outside kmemleak_lock
Buday Csaba (1): net: phy: smsc: add proper reset flags for LAN8710A
Cezary Rojewski (1): ASoC: Intel: avs: Fix uninitialized pointer error in probe()
Chao Gao (1): KVM: nVMX: Defer SVI update to vmcs01 on EOI when L2 is active w/o VID
Chao Yu (1): f2fs: fix to avoid out-of-boundary access in dnode page
Charles Keepax (1): soundwire: Move handle_nested_irq outside of sdw_dev_lock
Cheick Traore (1): pinctrl: stm32: Manage irq affinity settings
Chen Yu (1): ACPI: pfr_update: Fix the driver update version check
Chen-Yu Tsai (1): mfd: axp20x: Set explicit ID for AXP313 regulator
Chenyuan Yang (1): drm/amd/display: Add null pointer check in mod_hdcp_hdcp1_create_session()
Chris Mason (1): sched/fair: Bump sd->max_newidle_lb_cost when newidle balance fails
Christian Loehle (1): cpuidle: menu: Remove iowait influence
Christoph Hellwig (2): block: reject invalid operation in submit_bio_noacct xfs: fully decouple XFS_IBULK* flags from XFS_IWALK* flags
Christoph Paasch (1): mptcp: drop skb if MPTCP skb extension allocation fails
Christophe Leroy (1): ALSA: pcm: Rewrite recalculate_boundary() to avoid costly loop
Christopher Eby (1): ALSA: hda/realtek: Add Framework Laptop 13 (AMD Ryzen AI 300) to quirks
Corey Minyard (1): ipmi: Fix strcpy source and destination the same
Cristian Ciocaltea (1): ALSA: usb-audio: Avoid precedence issues in mixer_quirks macros
Cynthia Huang (1): selftests/futex: Define SYS_futex on 32-bit architectures with 64-bit time_t
D. Wythe (1): net/smc: fix UAF on smcsk after smc_listen_out()
Dai Ngo (1): NFSD: detect mismatch of file handle and delegation stateid in OPEN op
Damien Le Moal (9): ata: libata-sata: Disallow changing LPM state if not supported scsi: mpt3sas: Correctly handle ATA device errors scsi: mpi3mr: Correctly handle ATA device errors ata: libata-scsi: Fix ata_to_sense_error() status handling PCI: endpoint: Fix configfs group list head handling PCI: endpoint: Fix configfs group removal on driver teardown block: Make REQ_OP_ZONE_FINISH a write operation ata: Fix SATA_MOBILE_LPM_POLICY description in Kconfig ata: libata-scsi: Return aborted command when missing sense and result TF
Dan Carpenter (5): cpufreq: armada-8k: Fix off by one in armada_8k_cpufreq_free_table() media: gspca: Add bounds checking to firmware parser soc: qcom: mdt_loader: Fix error return values in mdt_header_valid() scsi: qla4xxx: Prevent a potential error pointer dereference ALSA: usb-audio: Fix size validation in convert_chmap_v3()
Daniel Golle (1): Revert "leds: trigger: netdev: Configure LED blink interval for HW offload"
Daniel Jurgens (1): net/mlx5: Base ECVF devlink port attrs from 0
Dave Stevenson (3): media: tc358743: Check I2C succeeded during probe media: tc358743: Return an appropriate colorspace from tc358743_set_fmt media: tc358743: Increase FIFO trigger level to 374
David Bauer (1): wifi: mt76: mt7915: mcu: re-init MCU before loading FW patch
David Collins (1): thermal/drivers/qcom-spmi-temp-alarm: Enable stage 2 shutdown when required
David Lechner (6): iio: adc: ad_sigma_delta: don't overallocate scan buffer iio: imu: bno055: fix OOB access of hw_xlate array iio: adc: ad_sigma_delta: change to buffer predisable iio: proximity: isl29501: fix buffered read on big-endian systems iio: temperature: maxim_thermocouple: use DMA-safe buffer for spi_read() iio: imu: inv_icm42600: use = { } instead of memset()
David Sterba (3): btrfs: move transaction aborts to the error site in add_block_group_free_space() btrfs: open code timespec64 in struct btrfs_inode btrfs: constify more pointer parameters
David Thompson (3): gpio: mlxbf2: use platform_get_irq_optional() Revert "gpio: mlxbf3: only get IRQ for device instance 0" gpio: mlxbf3: use platform_get_irq_optional()
Davide Caratti (1): net/sched: ets: use old 'nbands' while purging unused classes
Edward Adam Davis (2): jfs: Regular file corruption check comedi: pcl726: Prevent invalid irq number
Eliav Farber (1): pps: clients: gpio: fix interrupt handling order in remove path
Emanuele Ghidoli (1): arm64: dts: ti: k3-am62-verdin: Enable pull-ups on I2C buses
Emily Deng (1): drm/ttm: Should to return the evict error
Eric Biggers (4): thunderbolt: Fix copy+paste error in match_service_id() lib/crypto: mips/chacha: Fix clang build and remove unneeded byteswap ipv6: sr: Fix MAC comparison to be constant-time fscrypt: Don't use problematic non-inline crypto engines
Eric Dumazet (2): net: better track kernel sockets lifetime net_sched: sch_ets: implement lockless ets_dump()
Eric Work (1): net: atlantic: add set_power to fw_ops for atl2 to fix wol
Evgeniy Harchenko (1): ALSA: hda/realtek: Add support for HP EliteBook x360 830 G6 and EliteBook 830 G6
Fanhua Li (1): drm/nouveau/nvif: Fix potential memory leak in nvif_vmm_ctor().
Fedor Pchelkin (1): netlink: avoid infinite retry looping in netlink_unicast()
Fenglin Wu (1): leds: flash: leds-qcom-flash: Limit LED current based on thermal condition
Filipe Manana (14): btrfs: abort transaction during log replay if walk_log_tree() failed btrfs: clear dirty status from extent buffer on error at insert_new_root() btrfs: fix log tree replay failure due to file with 0 links and extents btrfs: don't ignore inode missing when replaying log tree btrfs: qgroup: fix race between quota disable and quota rescan ioctl btrfs: always abort transaction on failure to add block group to free space tree btrfs: abort transaction on unexpected eb generation at btrfs_copy_root() btrfs: send: factor out common logic when sending xattrs btrfs: send: only use boolean variables at process_recorded_refs() btrfs: send: add and use helper to rename current inode when processing refs btrfs: send: keep the current inode's path cached btrfs: send: avoid path allocation for the current inode when issuing commands btrfs: send: use fallocate for hole punching with send stream v2 btrfs: send: make fs_path_len() inline and constify its argument
Finn Thain (1): m68k: Fix lost column on framebuffer debug console
Florian Larysch (1): net: phy: micrel: fix KSZ8081/KSZ8091 cable test
Florian Westphal (2): netfilter: ctnetlink: fix refcount leak on table dump netfilter: nf_reject: don't leak dst refcount for loopback packets
Florin Leotescu (1): hwmon: (emc2305) Set initial PWM minimum value during probe based on thermal state
Frederic Weisbecker (1): rcu: Fix racy re-initialization of irq_work causing hangs
Gabor Juhos (1): mtd: spinand: propagate spinand_wait() errors from spinand_write_page()
Gabriel Totev (1): apparmor: shift ouid when mediating hard links in userns
Gal Pressman (2): net: vlan: Make is_vlan_dev() a stub when VLAN is not configured net: vlan: Replace BUG() with WARN_ON_ONCE() in vlan_dev_* stubs
Gang Ba (1): drm/amdgpu: Avoid extra evict-restore process.
Gautham R. Shenoy (1): pm: cpupower: Fix the snapshot-order of tsc,mperf, clock in mperf_stop()
Geliang Tang (2): mptcp: remove duplicate sk_reset_timer call mptcp: disable add_addr retransmission when timeout is 0
George Moussalem (1): clk: qcom: ipq5018: keep XO clock always on
Gerald Schaefer (1): s390/mm: Remove possible false-positive warning in pte_free_defer()
Geraldo Nascimento (3): phy: rockchip-pcie: Properly disable TEST_WRITE strobe signal PCI: rockchip: Use standard PCIe definitions PCI: rockchip: Set Target Link Speed to 5.0 GT/s before retraining
Giovanni Cabiddu (2): crypto: qat - lower priority for skcipher and aead algorithms crypto: qat - flush misc workqueue during device shutdown
Greg Kroah-Hartman (1): Linux 6.6.103
Gui-Dong Han (1): media: rainshadow-cec: fix TOCTOU race condition in rain_interrupt()
Haiyang Zhang (1): hv_netvsc: Fix panic during namespace deletion with VF
Hangbin Liu (2): bonding: update LACP activity flag after setting lacp_active bonding: send LACPDUs periodically in passive mode after receiving partner's LACPDU
Hans Verkuil (1): media: vivid: fix wrong pixel_array control size
Hans de Goede (2): mei: bus: Check for still connected devices in mei_cl_bus_dev_release() media: ivsc: Fix crash at shutdown due to missing mei_cldev_disable() calls
Haoran Jiang (1): LoongArch: BPF: Fix jump offset calculation in tailcall
Haoxiang Li (1): media: imx: fix a potential memory leak in imx_media_csc_scaler_device_init()
Harald Mommer (1): gpio: virtio: Fix config space reading.
Hari Chandrakanthan (1): wifi: mac80211: fix rx link assignment for non-MLO stations
Hari Kalavakunta (1): net: ncsi: Fix buffer overflow in fetching version id
Hariprasad Kelam (1): Octeontx2-af: Skip overlap check for SPI field
Heikki Krogerus (1): usb: dwc3: pci: add support for the Intel Wildcat Lake
Heiner Kallweit (1): dpaa_eth: don't use fixed_phy_change_carrier
Helge Deller (1): Revert "vgacon: Add check for vc_origin address range in vgacon_scroll()"
Herton R. Krzesinski (1): mm/debug_vm_pgtable: clear page table entries at destroy_args()
Hiago De Franco (1): remoteproc: imx_rproc: skip clock enable when M-core is managed by the SCU
Hong Guan (1): arm64: dts: ti: k3-am62a7-sk: fix pinmux for main_uart1
Horatiu Vultur (1): phy: mscc: Fix timestamping for vsc8584
Hsin-Te Yuan (1): thermal: sysfs: Return ENODATA instead of EAGAIN for reads
Huacai Chen (1): PCI: Extend isolated function probing to LoongArch
Ian Abbott (3): comedi: fix race between polling and detaching comedi: Make insn_rw_emulate_bits() do insn->n samples comedi: Fix use of uninitialized memory in do_insn_ioctl() and do_insnlist_ioctl()
Ido Schimmel (1): mlxsw: spectrum: Forward packets with an IPv4 link-local source IP
Igor Pylypiv (1): ata: libata-scsi: Fix CDL control
Ihor Solodrai (1): bpf: Make reg_not_null() true for CONST_PTR_TO_MAP
Ilan Peer (1): wifi: cfg80211: Fix interface type validation
Ilya Bakoulin (1): drm/amd/display: Separate set_gsl from set_gsl_source_select
Imre Deak (1): drm/dp: Change AUX DPCD probe address from DPCD_REV to LANE0_1_STATUS
Isaac J. Manjarres (4): mm: drop the assumption that VM_SHARED always implies writable mm: update memfd seal write check to include F_SEAL_WRITE mm: reinstate ability to map write-sealed memfd mappings read-only selftests/memfd: add test for mapping write-sealed memfd read-only
Jack Xiao (1): drm/amdgpu: fix incorrect vm flags to map bo
Jaegeuk Kim (1): f2fs: check the generic conditions first
Jakub Acs (1): net, hsr: reject HSR frame if skb can't hold tag
Jakub Kicinski (3): tls: handle data disappearing from under the TLS ULP uapi: in6: restore visibility of most IPv6 socket options tls: fix handling of zero-length records on the rx_list
Jakub Ramaseuski (1): net: gso: Forbid IPv6 TSO with extensions on devices with only IPV6_CSUM
Jan Beulich (1): compiler: remove __ADDRESSABLE_ASM{_STR,}() again
Jan Kara (2): loop: Avoid updating block size under exclusive owner udf: Verify partition map count
Jann Horn (1): eventpoll: Fix semi-unbounded recursion
Jason Gunthorpe (1): iommufd: Prevent ALIGN() overflow
Jason Wang (1): vhost: fail early when __vhost_add_used() fails
Jason Xing (1): ixgbe: xsk: resolve the negative overflow of budget in ixgbe_xmit_zc
Jay Chen (1): usb: xhci: Set avg_trb_len = 8 for EP0 during Address Device Command
Jean-Baptiste Maneyrol (1): iio: imu: inv_icm42600: change invalid data error to -EBUSY
Jeff Layton (1): nfsd: handle get_client_locked() failure in nfsd4_setclientid_confirm()
Jens Axboe (1): io_uring/net: commit partial buffers on retry
Jeongjun Park (1): ptp: prevent possible ABBA deadlock in ptp_clock_freerun()
Jiasheng Jiang (1): scsi: lpfc: Remove redundant assignment to avoid memory leak
Jiayi Li (2): ACPI: processor: perflib: Fix initial _PPC limit application memstick: Fix deadlock by moving removing flag earlier
Jinjiang Tu (1): mm/memory-failure: fix infinite UCE for VM_PFNMAP pfn
Joel Fernandes (1): rcu: Fix rcu_read_unlock() deadloop due to IRQ work
Johan Adolfsson (1): leds: leds-lp50xx: Handle reg to get correct multi_index
Johan Hovold (15): net: gianfar: fix device leak when querying time stamp info net: enetc: fix device and OF node leak at probe net: mtk_eth_soc: fix device leak at probe net: ti: icss-iep: fix device and OF node leaks at probe net: dpaa: fix device leak when querying time stamp info usb: gadget: udc: renesas_usb3: fix device leak at unbind usb: musb: omap2430: fix device leak at unbind usb: dwc3: meson-g12a: fix device leaks at unbind wifi: ath12k: fix dest ring-buffer corruption wifi: ath12k: fix source ring-buffer corruption wifi: ath12k: fix dest ring-buffer corruption when ring is full wifi: ath11k: fix dest ring-buffer corruption wifi: ath11k: fix source ring-buffer corruption wifi: ath11k: fix dest ring-buffer corruption when ring is full usb: dwc3: imx8mp: fix device leak at unbind
Johannes Berg (2): wifi: cfg80211: reject HTC bit for management frames wifi: mac80211: don't complete management TX on SAE commit
Johannes Thumshirn (1): btrfs: zoned: use filesystem size not disk size for reclaim decision
John David Anglin (8): parisc: Check region is readable by user in raw_copy_from_user() parisc: Define and use set_pte_at() parisc: Drop WARN_ON_ONCE() from flush_cache_vmap parisc: Rename pte_needs_flush() to pte_needs_cache_flush() in cache.c parisc: Revise __get_user() to probe user read access parisc: Revise gateway LWS calls to probe user read access parisc: Try to fixup kernel exception in bad_area_nosemaphore path of do_page_fault() parisc: Update comments in make_insert_tlb
John Garry (2): scsi: aacraid: Stop using PCI_IRQ_AFFINITY block: avoid possible overflow for chunk_sectors check in blk_stack_limits()
Jon Hunter (1): soc/tegra: pmc: Ensure power-domains are in a known state
Jonas Rebmann (1): net: fec: allow disable coalescing
Jonathan Cameron (2): iio: light: as73211: Ensure buffer holes are zeroed iio: imu: inv_icm42600: switch timestamp type from int64_t __aligned(8) to aligned_s64
Jonathan Santos (1): iio: adc: ad7768-1: Ensure SYNC_IN pulse minimum timing requirement
Jordan Rhee (1): gve: prevent ethtool ops after shutdown
Jorge Marques (1): i3c: master: Initialize ret in i3c_i2c_notifier_call()
Jorge Ramirez-Ortiz (2): media: venus: hfi: explicitly release IRQ during teardown media: venus: protect against spurious interrupts during probe
Judith Mendez (1): arm64: dts: ti: k3-am62-main: Remove eMMC High Speed DDR support
Justin Tee (1): scsi: lpfc: Check for hdwq null ptr when cleaning up lpfc_vport structure
Kanglong Wang (1): LoongArch: Optimize module load time by optimizing PLT/GOT counting
Karthikeyan Kathirvel (1): wifi: ath12k: Decrement TID on RX peer frag setup error handling
Kashyap Desai (2): RDMA/bnxt_re: Fix to do SRQ armena by default RDMA/bnxt_re: Fix to remove workload check in SRQ limit path
Kathiravan Thirumoorthy (1): phy: qcom: phy-qcom-m31: Update IPQ5332 M31 USB phy initialization sequence
Kees Cook (3): arm64: Handle KCOV __init vs inline mismatches platform/x86: thinkpad_acpi: Handle KCOV __init vs inline mismatches iommu/amd: Avoid stack buffer overflow from kernel cmdline
Keith Busch (2): nvme-pci: try function level reset on init failure vfio/type1: conditional rescheduling while pinning
Krzysztof Hałasa (1): imx8m-blk-ctrl: set ISI panic write hurry level
Krzysztof Kozlowski (3): dt-bindings: display: sprd,sharkl3-dpu: Fix missing clocks constraints dt-bindings: display: sprd,sharkl3-dsi-host: Fix missing clocks constraints leds: flash: leds-qcom-flash: Fix registry access after re-bind
Kuen-Han Tsai (1): usb: dwc3: Ignore late xferNotReady event to prevent halt timeout
Kuninori Morimoto (1): ASoC: soc-dapm: set bias_level if snd_soc_dapm_set_bias_level() was successed
Kuniyuki Iwashima (2): ipv6: mcast: Check inet6_dev->dead under idev->mc_lock in __ipv6_dev_mc_inc(). net: Add net_passive_inc() and net_passive_dec().
Lad Prabhakar (1): drm: renesas: rz-du: mipi_dsi: Add min check for VCLK range
Laurentiu Mihalcea (1): pwm: imx-tpm: Reset counter if CMOD is 0
Len Brown (1): intel_idle: Allow loading ACPI tables for any family
Leon Romanovsky (1): net/mlx5e: Properly access RCU protected qdisc_sleeping variable
Liao Yuanhong (1): ext4: use kmalloc_array() for array space allocation
Lifeng Zheng (2): cpufreq: Exit governor when failed to start old governor PM / devfreq: governor: Replace sscanf() with kstrtoul() in set_freq_store()
Lizhi Xu (2): fs/ntfs3: Add sanity check for file name jfs: truncate good inode pages when hard link is 0
Lucy Thrun (1): ALSA: hda/ca0132: Fix buffer overflow in add_tuning_control
Ludwig Disterhof (1): media: usbtv: Lock resolution while streaming
Lukas Wunner (1): PCI/ACPI: Fix runtime PM ref imbalance on Hot-Plug Capable ports
Ma Ke (1): sunvdc: Balance device refcount in vdc_port_mpgroup_check
Mael GUERIN (1): USB: storage: Add unusual-devs entry for Novatek NTK96550-based camera
Manuel Andreas (1): KVM: x86/hyper-v: Skip non-canonical addresses during PV TLB flush
Marek Szyprowski (1): zynq_fpga: use sgtable-based scatterlist wrappers
Marek Vasut (1): usb: renesas-xhci: Fix External ROM access timeouts
Mario Limonciello (8): platform/x86/amd: pmc: Add Lenovo Yoga 6 13ALC6 to pmc quirk list usb: xhci: Avoid showing warnings for dying controller usb: xhci: Avoid showing errors during surprise removal drm/amd: Allow printing VanGogh OD SCLK levels without setting dpm to manual drm/amd/display: Only finalize atomic_obj if it was initialized drm/amd/display: Avoid configuring PSR granularity if PSR-SU not supported drm/amd: Restore cached power limit during resume drm/amd/display: Avoid a NULL pointer dereference
Mark Brown (2): ASoC: hdac_hdmi: Rate limit logging on connection and disconnection kselftest/arm64: Specify SVE data when testing VL set in sve-ptrace
Markus Theil (1): crypto: jitter - fix intermediary handling
Masahiro Yamada (2): kconfig: gconf: avoid hardcoding model2 in on_treeview2_cursor_changed() kconfig: gconf: fix potential memory leak in renderer_edited()
Masami Hiramatsu (Google) (2): selftests: tracing: Use mutex_unlock for testing glob filter tracing: fprobe-event: Sanitize wildcard for fprobe event name
Mateusz Guzik (1): apparmor: use the condition in AA_BUG_FMT even with debug disabled
Matt Johnston (1): net: mctp: Prevent duplicate binds
Matthieu Baerts (NGI0) (2): mptcp: pm: kernel: flush: do not reset ADD_ADDR limit selftests: mptcp: pm: check flush doesn't reset limits
Maulik Shah (1): soc: qcom: rpmh-rsc: Add RSC version 4 support
Maurizio Lombardi (1): scsi: target: core: Generate correct identifiers for PR OUT transport IDs
Maxim Levitsky (3): KVM: nVMX: Check vmcs12->guest_ia32_debugctl on nested VM-Enter KVM: VMX: Wrap all accesses to IA32_DEBUGCTL with getter/setter APIs KVM: VMX: Preserve host's DEBUGCTLMSR_FREEZE_IN_SMM while running the guest
Meagan Lloyd (2): rtc: ds1307: handle oscillator stop flag (OSF) for ds1341 rtc: ds1307: remove clear of oscillator stop flag (OSF) in probe
Miao Li (1): usb: quirks: Add DELAY_INIT quick for another SanDisk 3.2Gen1 Flash Drive
Miaoqian Lin (1): most: core: Drop device reference after usage in get_channel()
Michael Walle (1): mtd: spi-nor: Fix spi_nor_try_unlock_all()
Michal Suchanek (1): powerpc/boot: Fix build with gcc 15
Michel Dänzer (1): drm/amd/display: Add primary plane to commits for correct VRR handling
Mikhail Lobanov (1): wifi: mac80211: check basic rates validity in sta_link_apply_parameters
Mikulas Patocka (1): dm-mpath: don't print the "loaded" message if registering fails
Mina Almasry (1): netmem: fix skb_frag_address_safe with unreadable skbs
Minhong He (1): ipv6: sr: validate HMAC algorithm ID in seg6_hmac_info_add
Miri Korenblit (1): wifi: iwlwifi: mvm: set gtk id also in older FWs
Myrrh Periwinkle (3): usb: typec: ucsi: Update power_supply on power role change vt: keyboard: Don't process Unicode characters in K_OFF mode vt: defkeymap: Map keycodes above 127 to K_HOLE
Namjae Jeon (1): ksmbd: extend the connection limiting mechanism to support IPv6
Naohiro Aota (3): btrfs: zoned: do not remove unwritten non-data block group btrfs: zoned: do not select metadata BG as finish target btrfs: zoned: fix write time activation failure for metadata block group
Nathan Chancellor (2): usb: atm: cxacru: Merge cxacru_upload_firmware() into cxacru_heavy_init() wifi: brcmsmac: Remove const from tbl_ptr parameter in wlc_lcnphy_common_read_table()
NeilBrown (1): smb/server: avoid deadlock when linking with ReplaceIfExists
Nicolas Dufresne (1): media: verisilicon: Fix AV1 decoder clock frequency
Nicolas Escande (1): neighbour: add support for NUD_PERMANENT proxy entries
Nicolin Chen (1): iommufd: Report unmapped bytes in the error path of iopt_unmap_iova_range
Niklas Söderlund (1): media: v4l2-common: Reduce warnings about missing V4L2_CID_LINK_FREQ control
Nitin Gote (1): iosys-map: Fix undefined behavior in iosys_map_clear()
Ojaswin Mujoo (2): ext4: fix fsmap end of range reporting with bigalloc ext4: fix reserved gdt blocks handling in fsmap
Oliver Neukum (3): usb: core: usb_submit_urb: downgrade type check net: usb: cdc-ncm: check for filtering capability cdc-acm: fix race between initial clearing halt and open
Oscar Maes (1): net: ipv4: fix incorrect MTU in broadcast routes
Pagadala Yesu Anjaneyulu (1): wifi: iwlwifi: fw: Fix possible memory leak in iwl_fw_dbg_collect
Pali Rohár (1): cifs: Fix calling CIFSFindFirst() for root path without msearch
Paul E. McKenney (1): rcu: Protect ->defer_qs_iw_pending from data race
Pauli Virtanen (1): Bluetooth: hci_event: fix MTU for BN == 0 in CIS Established
Pavel Begunkov (1): io_uring: don't use int for ABI
Pawan Gupta (1): x86/bugs: Avoid warning when overriding return thunk
Pedro Falcato (1): RDMA/siw: Fix the sendmsg byte count in siw_tcp_sendpages
Pei Xiao (1): clk: tegra: periph: Fix error handling and resolve unsigned compare warning
Peter Oberparleiter (3): s390/sclp: Fix SCCB present check s390/hypfs: Avoid unnecessary ioctl registration in debugfs s390/hypfs: Enable limited access during lockdown
Peter Robinson (1): reset: brcmstb: Enable reset drivers for ARCH_BCM2835
Peter Ujfalusi (1): ASoC: core: Check for rtd == NULL in snd_soc_remove_pcm_runtime()
Petr Pavlu (1): module: Prevent silent truncation of module name in delete_module(2)
Phillip Lougher (1): squashfs: fix memory leak in squashfs_fill_super
Prashant Malani (1): cpufreq: CPPC: Mark driver with NEED_UPDATE_LIMITS flag
Pu Lehui (1): tracing: Limit access to parser->buffer when trace_get_user failed
Purva Yeshi (1): md: dm-zoned-target: Initialize return variable r to avoid uninitialized use
Qingfang Deng (2): net: ethernet: mtk_ppe: add RCU lock around dev_fill_forward_path ppp: fix race conditions in ppp_fill_forward_path
Qu Wenruo (2): btrfs: do not allow relocation of partially dropped subvolumes btrfs: populate otime when logging an inode item
Rafael J. Wysocki (5): ACPI: processor: perflib: Move problematic pr->performance check cpuidle: governors: menu: Avoid using invalid recent intervals data PM: runtime: Clear power.needs_force_resume in pm_runtime_reinit() PM: runtime: Take active children into account in pm_runtime_get_if_in_use() cpuidle: governors: menu: Avoid selecting states with too much latency
Raj Kumar Bhagat (1): wifi: ath12k: Enable REO queue lookup table feature on QCN9274 hw2.0
Ramya Gnanasekar (1): wifi: mac80211: update radar_required in channel context after channel switch
Rand Deeb (1): wifi: iwlwifi: dvm: fix potential overflow in rs_fill_link_cmd()
Randy Dunlap (2): parisc: Makefile: fix a typo in palo.conf parisc: Makefile: explain that 64BIT requires both 32-bit and 64-bit compilers
Ranjan Kumar (4): scsi: Fix sas_user_scan() to handle wildcard and multi-channel scans scsi: mpi3mr: Fix race between config read submit and interrupt completion scsi: mpi3mr: Drop unnecessary volatile from __iomem pointers scsi: mpi3mr: Serialize admin queue BAR writes on 32-bit systems
Ricardo Ribalda (3): media: uvcvideo: Do not mark valid metadata as invalid media: venus: vdec: Clamp param smaller than 1fps and bigger than 240. media: venus: venc: Clamp param smaller than 1fps and bigger than 240
Richard Zhu (2): PCI: imx6: Delay link start until configfs 'start' written PCI: imx6: Add IMX8MM_EP and IMX8MP_EP fixed 256-byte BAR 4 in epc_features
Ricky Wu (1): misc: rtsx: usb: Ensure mmc child device is active when card is present
Rob Clark (1): drm/msm: use trylock for debugfs
Rong Zhang (1): fs/ntfs3: correctly create symlink for relative path
Sabrina Dubroca (1): udp: also consider secpath when evaluating ipsec use for checksumming
Sakari Ailus (2): media: v4l2-ctrls: Don't reset handler's error in v4l2_ctrl_handler_free() PM: runtime: Simplify pm_runtime_get_if_active() usage
Salah Triki (1): iio: pressure: bmp280: Use IS_ERR() in bmp280_common_probe()
Sarah Newman (1): drbd: add missing kref_get in handle_write_conflicts
Sarika Sharma (2): wifi: ath12k: Correct tid cleanup when tid setup fails wifi: ath12k: Add memset and update default rate value in wmi tx completion
Sarthak Garg (1): mmc: sdhci-msm: Ensure SD card power isn't ON when card removed
Sasha Levin (1): fs: Prevent file descriptor table allocations exceeding INT_MAX
Sean Christopherson (15): KVM: SVM: Set RFLAGS.IF=1 in C code, to get VMRUN out of the STI shadow KVM: x86: Plumb in the vCPU to kvm_x86_ops.hwapic_isr_update() KVM: x86: Snapshot the host's DEBUGCTL in common x86 KVM: x86: Snapshot the host's DEBUGCTL after disabling IRQs KVM: x86: Plumb "force_immediate_exit" into kvm_entry() tracepoint KVM: VMX: Re-enter guest in fastpath for "spurious" preemption timer exits KVM: VMX: Handle forced exit due to preemption timer in fastpath KVM: x86: Move handling of is_guest_mode() into fastpath exit handlers KVM: VMX: Handle KVM-induced preemption timer exits in fastpath for L2 KVM: x86: Fully defer to vendor code to decide how to force immediate exit KVM: x86: Convert vcpu_run()'s immediate exit param into a generic bitmap KVM: x86: Drop kvm_x86_ops.set_dr6() in favor of a new KVM_RUN flag KVM: VMX: Allow guest to set DEBUGCTL.RTM_DEBUG if RTM is supported KVM: VMX: Extract checking of guest's DEBUGCTL into helper KVM: x86: Take irqfds.lock when adding/deleting IRQ bypass producer
Sebastian Ott (1): ACPI: processor: fix acpi_object initialization
Sebastian Reichel (2): watchdog: dw_wdt: Fix default timeout usb: typec: fusb302: cache PD RX state
Selvarasu Ganesan (1): usb: dwc3: Remove WARN_ON for device endpoint command timeouts
Sergey Bashirov (4): pNFS: Fix stripe mapping in block/scsi layout pNFS: Fix disk addr range check in block/scsi layout pNFS: Handle RPC size limit for layoutcommits pNFS: Fix uninited ptr deref in block/scsi layout
Sergey Shtylyov (1): Bluetooth: hci_conn: do return error from hci_enhanced_setup_sync()
Shankari Anand (1): kconfig: nconf: Ensure null termination where strncpy is used
Shannon Nelson (1): ionic: clean dbpage in de-init
Shengjiu Wang (1): ASoC: fsl_sai: replace regmap_write with regmap_update_bits
Shiji Yang (2): MIPS: vpe-mt: add missing prototypes for vpe_{alloc,start,stop,free} MIPS: lantiq: falcon: sysctrl: fix request memory check logic
Showrya M N (1): scsi: libiscsi: Initialize iscsi_conn->dd_data only if memory is allocated
Shuai Xue (1): ACPI: APEI: send SIGBUS to current task if synchronous memory error not recovered
Shubhrajyoti Datta (1): EDAC/synopsys: Clear the ECC counters on init
Shyam Prasad N (1): cifs: reset iface weights when we cannot find a candidate
Sravan Kumar Gundu (1): fbdev: Fix vmalloc out-of-bounds write in fast_imageblit
Srinivas Kandagatla (1): ASoC: qcom: use drvdata instead of component to keep id
Stanislaw Gruszka (1): wifi: iwlegacy: Check rate_idx range after addition
Stefan Metzmacher (3): smb: client: let send_done() cleanup before calling smbd_disconnect_rdma_connection() smb: client: don't wait for info->send_pending == 0 on error smb: server: split ksmbd_rdma_stop_listening() out of ksmbd_rdma_destroy()
Stefan Wahren (1): spi: spi-fsl-lpspi: Clamp too high speed_hz
Steve French (1): smb3: fix for slab out of bounds on mount to ksmbd
Steven Rostedt (5): tracefs: Add d_delete to remove negative dentries powerpc/thp: tracing: Hide hugepage events under CONFIG_PPC_BOOK3S_64 ktest.pl: Prevent recursion of default variable options ftrace: Also allocate and copy hash for reading of filter files tracing: Remove unneeded goto out logic
Su Hui (1): usb: xhci: print xhci->xhc_state when queue_command failed
Suchit Karunakaran (1): kconfig: lxdialog: replace strcpy() with strncpy() in inputbox.c
Sven Schnelle (2): s390/time: Use monotonic clock in get_cycles() s390/stp: Remove udelay from stp_sync_clock()
Takashi Iwai (5): ALSA: usb-audio: Validate UAC3 power domain descriptors, too ALSA: usb-audio: Validate UAC3 cluster segment descriptors ALSA: hda: Handle the jack polling always via a work ALSA: hda: Disable jack polling at shutdown ALSA: usb-audio: Use correct sub-type for UAC3 feature unit validation
Tetsuo Handa (1): hfsplus: don't use BUG_ON() in hfsplus_create_attributes_file()
Theodore Ts'o (2): ext4: do not BUG when INLINE_DATA_FL lacks system.data xattr ext4: don't try to clear the orphan_present feature block device is r/o
Thierry Reding (1): firmware: tegra: Fix IVC dependency problems
Thomas Fourier (8): et131x: Add missing check after DMA map net: ag71xx: Add missing check after DMA map (powerpc/512) Fix possible `dma_unmap_single()` on uninitialized pointer wifi: rtlwifi: fix possible skb memory leak in `_rtl_pci_rx_interrupt()`. powerpc: floppy: Add missing checks after DMA map wifi: rtlwifi: fix possible skb memory leak in _rtl_pci_init_one_rxdesc() mtd: rawnand: fsmc: Add missing check after DMA map mtd: rawnand: renesas: Add missing check after DMA map
Thomas Weißschuh (5): tools/nolibc: define time_t in terms of __kernel_old_time_t tools/build: Fix s390(x) cross-compilation with clang um: Re-evaluate thread flags repeatedly MIPS: Don't crash in stack_top() for tasks without ABI or vDSO kbuild: userprogs: use correct linker when mixing clang and GNU ld
Thorsten Blum (2): cdx: Fix off-by-one error in cdx_rpmsg_probe() usb: storage: realtek_cr: Use correct byte order for bcs->Residue
Tianxiang Peng (1): x86/cpu/hygon: Add missing resctrl_cpu_detect() in bsp_init helper
Tim Harvey (1): hwmon: (gsc-hwmon) fix fan pwm setpoint show functions
Timur Kristóf (6): drm/amd/display: Don't overwrite dce60_clk_mgr drm/amd/display: Fix fractional fb divider in set_pixel_clock_v3 drm/amd/display: Fix DP audio DTO1 clock source on DCE 6. drm/amd/display: Find first CRTC and its line time in dce110_fill_display_configs drm/amd/display: Fill display clock and vblank time in dce110_fill_display_configs drm/amd/display: Don't overclock DCE 6 by 15%
Tomasz Michalec (2): usb: typec: intel_pmc_mux: Defer probe if SCU IPC isn't present platform/chrome: cros_ec_typec: Defer probe on missing EC parent
Trond Myklebust (1): NFS: Fix the setting of capabilities when automounting a new filesystem
Tvrtko Ursulin (1): drm/ttm: Respect the shrinker core free target
Tzung-Bi Shih (1): platform/chrome: cros_ec: Unregister notifier in cros_ec_unregister()
Ulf Hansson (1): mmc: rtsx_usb_sdmmc: Fix error-path in sd_set_power_mode()
Uwe Kleine-König (2): pwm: mediatek: Handle hardware enable and clock enable separately pwm: mediatek: Fix duty and period setting
ValdikSS (1): igc: fix disabling L1.2 PCI-E link substate on I226 on init
Vasiliy Kovalev (1): ALSA: hda/realtek: Fix headset mic on HONOR BRB-X
Vedang Nagar (2): media: venus: Fix OOB read due to missing payload bound check media: venus: Add a check for packet size after reading from shared memory
Viacheslav Dubeyko (5): hfs: fix general protection fault in hfs_find_init() hfs: fix slab-out-of-bounds in hfs_bnode_read() hfsplus: fix slab-out-of-bounds in hfsplus_bnode_read() hfsplus: fix slab-out-of-bounds read in hfsplus_uni2asc() hfs: fix not erasing deleted b-tree node issue
Victor Shih (3): mmc: sdhci-pci-gli: GL9763e: Rename the gli_set_gl9763e() for consistency mmc: sdhci-pci-gli: Add a new function to simplify the code mmc: sdhci-pci-gli: GL9763e: Mask the replay timer timeout of AER
Vijendar Mukunda (1): soundwire: amd: serialize amd manager resume sequence during pm_prepare
Vincent Guittot (8): sched/topology: Add a new arch_scale_freq_ref() method cpufreq: Use the fixed and coherent frequency for scaling capacity cpufreq/schedutil: Use a fixed reference frequency energy_model: Use a fixed reference frequency cpufreq/cppc: Set the frequency used for computing the capacity arm64/amu: Use capacity_ref_freq() to set AMU ratio topology: Set capacity_freq_ref in all cases sched/fair: Fix frequency selection for non-invariant case
Vladimir Zapolskiy (1): media: qcom: camss: cleanup media device allocated resource on error path
Waiman Long (2): mm/kmemleak: avoid soft lockup in __kmemleak_do_cleanup() cgroup/cpuset: Use static_branch_enable_cpuslocked() on cpusets_insane_config_key
Wang Liang (1): net: bridge: fix soft lockup in br_multicast_query_expired()
Wang Zhaolong (2): smb: client: remove redundant lstrp update in negotiate protocol smb: client: fix netns refcount leak after net_passive changes
Wayne Lin (1): drm/amd/display: Avoid trying AUX transactions on disconnected ports
Wei Gao (1): ext2: Handle fiemap on empty files to prevent EINVAL
Weitao Wang (1): usb: xhci: Fix slot_id resource race conflict
Wen Chen (1): drm/amd/display: Fix 'failed to blank crtc!'
Will Deacon (4): vsock/virtio: Resize receive buffers so that each SKB fits in a 4K page vsock/virtio: Validate length in packet header before skb_put() vhost/vsock: Avoid allocating arbitrarily-sized SKBs KVM: arm64: Fix kernel BUG() due to bad backport of FPSIMD/SVE/SME fix
William Liu (2): net/sched: Make cake_enqueue return NET_XMIT_CN when past buffer_limit net/sched: Remove unnecessary WARNING condition for empty child qdisc in htb_activate
Willy Tarreau (1): tools/nolibc: fix spelling of FD_SETBITMASK in FD_* macros
Wolfram Sang (3): media: usb: hdpvr: disable zero-length read messages i3c: add missing include to internal header i3c: don't fail if GETHDRCAP is unsupported
Xin Long (1): sctp: linearize cloned gso packets in sctp_rcv
Xinxin Wan (1): ASoC: codecs: rt5640: Retry DEVICE_ID verification
Xinyu Liu (1): usb: core: config: Prevent OOB read in SS endpoint companion parsing
Xu Yang (2): net: usb: asix_devices: add phy_mask for ax88772 mdio bus usb: core: hcd: fix accessing unmapped memory in SINGLE_STEP_SET_FEATURE test
Xu Yilun (1): fpga: zynq_fpga: Fix the wrong usage of dma_map_sgtable()
Yann E. MORIN (1): kconfig: lxdialog: fix 'space' to (de)select options
Ye Bin (1): fs/buffer: fix use-after-free when call bh_read() helper
Yonghong Song (1): selftests/bpf: Fix a user_ringbuf failure with arm64 64KB page size
Yongzhen Zhang (1): fbdev: fix potential buffer overflow in do_register_framebuffer()
Youngjun Lee (1): media: uvcvideo: Fix 1-byte out-of-bounds read in uvc_parse_format()
Youssef Samir (1): bus: mhi: host: Detect events pointing to unexpected TREs
Yuan Chen (1): bpftool: Fix JSON writer resource leak in version command
Yuezhang Mo (1): exfat: add cluster chain loop check for dir
Yuichiro Tsuji (1): net: usb: asix_devices: Fix PHY address mask in MDIO bus initialization
Yunhui Cui (1): serial: 8250: fix panic due to PSLVERR
Yury Norov [NVIDIA] (1): RDMA: hfi1: fix possible divide-by-zero in find_hw_thread_mask()
Zenm Chen (1): USB: storage: Ignore driver CD mode for Realtek multi-mode Wi-Fi dongles
Zhang Shurong (1): media: ov2659: Fix memory leaks in ov2659_probe()
Zhang Yi (1): ext4: fix hole length calculation overflow in non-extent inodes
Zhiqi Song (1): crypto: hisilicon/hpre - fix dma unmap sequence
Zhu Qiyu (1): ACPI: PRM: Reduce unnecessary printing to avoid user confusion
Zijun Hu (2): char: misc: Fix improper and inaccurate error code returned by misc_init() Bluetooth: hci_sock: Reset cookie to zero in hci_sock_free_cookie()
Ziyan Fu (1): watchdog: iTCO_wdt: Report error if timeout configuration fails
Ziyan Xu (1): ksmbd: fix refcount leak causing resource not released
chenchangcheng (1): media: uvcvideo: Fix bandwidth issue for Alcor camera
fangzhong.zhou (1): i2c: Force DLL0945 touchpad i2c freq to 100khz
jackysliu (1): scsi: bfa: Double-free fix
tuhaowen (1): PM: sleep: console: Fix the black screen issue
zhangjianrong (2): net: thunderbolt: Enable end-to-end flow control also in transmit net: thunderbolt: Fix the parameter passing of tb_xdomain_enable_paths()/tb_xdomain_disable_paths()
Álvaro Fernández Rojas (5): net: dsa: b53: fix b53_imp_vlan_setup for BCM5325 net: dsa: b53: prevent GMII_PORT_OVERRIDE_CTRL access on BCM5325 net: dsa: b53: prevent DIS_LEARNING access on BCM5325 net: dsa: b53: prevent SWITCH_CTRL access on BCM5325 net: dsa: b53: fix IP_MULTICAST_CTRL on BCM5325
diff --git a/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dpu.yaml b/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dpu.yaml index 4ebea60b8c5b..8c52fa0ea5f8 100644 --- a/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dpu.yaml +++ b/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dpu.yaml @@ -25,7 +25,7 @@ properties: maxItems: 1
clocks: - minItems: 2 + maxItems: 2
clock-names: items: diff --git a/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dsi-host.yaml b/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dsi-host.yaml index bc5594d18643..300bf2252c3e 100644 --- a/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dsi-host.yaml +++ b/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dsi-host.yaml @@ -20,7 +20,7 @@ properties: maxItems: 2
clocks: - minItems: 1 + maxItems: 1
clock-names: items: diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst index a624e92f2687..6ba11dfb4bf3 100644 --- a/Documentation/filesystems/fscrypt.rst +++ b/Documentation/filesystems/fscrypt.rst @@ -141,9 +141,8 @@ However, these ioctls have some limitations: CONFIG_PAGE_POISONING=y in your kernel config and add page_poison=1 to your kernel command line. However, this has a performance cost.
-- Secret keys might still exist in CPU registers, in crypto - accelerator hardware (if used by the crypto API to implement any of - the algorithms), or in other places not explicitly considered here. +- Secret keys might still exist in CPU registers or in other places + not explicitly considered here.
Limitations of v1 policies ~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -375,9 +374,12 @@ the work is done by XChaCha12, which is much faster than AES when AES acceleration is unavailable. For more information about Adiantum, see `the Adiantum paper https://eprint.iacr.org/2018/720.pdf`_.
-The (AES-128-CBC-ESSIV, AES-128-CTS-CBC) pair exists only to support -systems whose only form of AES acceleration is an off-CPU crypto -accelerator such as CAAM or CESA that does not support XTS. +The (AES-128-CBC-ESSIV, AES-128-CTS-CBC) pair was added to try to +provide a more efficient option for systems that lack AES instructions +in the CPU but do have a non-inline crypto engine such as CAAM or CESA +that supports AES-CBC (and not AES-XTS). This is deprecated. It has +been shown that just doing AES on the CPU is actually faster. +Moreover, Adiantum is faster still and is recommended on such systems.
The remaining mode pairs are the "national pride ciphers":
@@ -1231,22 +1233,13 @@ this by validating all top-level encryption policies prior to access. Inline encryption support =========================
-By default, fscrypt uses the kernel crypto API for all cryptographic -operations (other than HKDF, which fscrypt partially implements -itself). The kernel crypto API supports hardware crypto accelerators, -but only ones that work in the traditional way where all inputs and -outputs (e.g. plaintexts and ciphertexts) are in memory. fscrypt can -take advantage of such hardware, but the traditional acceleration -model isn't particularly efficient and fscrypt hasn't been optimized -for it. - -Instead, many newer systems (especially mobile SoCs) have *inline -encryption hardware* that can encrypt/decrypt data while it is on its -way to/from the storage device. Linux supports inline encryption -through a set of extensions to the block layer called *blk-crypto*. -blk-crypto allows filesystems to attach encryption contexts to bios -(I/O requests) to specify how the data will be encrypted or decrypted -in-line. For more information about blk-crypto, see +Many newer systems (especially mobile SoCs) have *inline encryption +hardware* that can encrypt/decrypt data while it is on its way to/from +the storage device. Linux supports inline encryption through a set of +extensions to the block layer called *blk-crypto*. blk-crypto allows +filesystems to attach encryption contexts to bios (I/O requests) to +specify how the data will be encrypted or decrypted in-line. For more +information about blk-crypto, see :ref:`Documentation/block/inline-encryption.rst <inline_encryption>`.
On supported filesystems (currently ext4 and f2fs), fscrypt can use diff --git a/Documentation/firmware-guide/acpi/i2c-muxes.rst b/Documentation/firmware-guide/acpi/i2c-muxes.rst index 3a8997ccd7c4..f366539acd79 100644 --- a/Documentation/firmware-guide/acpi/i2c-muxes.rst +++ b/Documentation/firmware-guide/acpi/i2c-muxes.rst @@ -14,7 +14,7 @@ Consider this topology:: | | | 0x70 |--CH01--> i2c client B (0x50) +------+ +------+
-which corresponds to the following ASL:: +which corresponds to the following ASL (in the scope of _SB)::
Device (SMB1) { @@ -24,7 +24,7 @@ which corresponds to the following ASL:: Name (_HID, ...) Name (_CRS, ResourceTemplate () { I2cSerialBus (0x70, ControllerInitiated, I2C_SPEED, - AddressingMode7Bit, "^SMB1", 0x00, + AddressingMode7Bit, "\_SB.SMB1", 0x00, ResourceConsumer,,) }
@@ -37,7 +37,7 @@ which corresponds to the following ASL:: Name (_HID, ...) Name (_CRS, ResourceTemplate () { I2cSerialBus (0x50, ControllerInitiated, I2C_SPEED, - AddressingMode7Bit, "^CH00", 0x00, + AddressingMode7Bit, "\_SB.SMB1.CH00", 0x00, ResourceConsumer,,) } } @@ -52,7 +52,7 @@ which corresponds to the following ASL:: Name (_HID, ...) Name (_CRS, ResourceTemplate () { I2cSerialBus (0x50, ControllerInitiated, I2C_SPEED, - AddressingMode7Bit, "^CH01", 0x00, + AddressingMode7Bit, "\_SB.SMB1.CH01", 0x00, ResourceConsumer,,) } } diff --git a/Documentation/networking/bonding.rst b/Documentation/networking/bonding.rst index f7a73421eb76..e774b48de9f5 100644 --- a/Documentation/networking/bonding.rst +++ b/Documentation/networking/bonding.rst @@ -444,6 +444,18 @@ arp_missed_max
The default value is 2, and the allowable range is 1 - 255.
+coupled_control + + Specifies whether the LACP state machine's MUX in the 802.3ad mode + should have separate Collecting and Distributing states. + + This is by implementing the independent control state machine per + IEEE 802.1AX-2008 5.4.15 in addition to the existing coupled control + state machine. + + The default value is 1. This setting does not separate the Collecting + and Distributing states, maintaining the bond in coupled control. + downdelay
Specifies the time, in milliseconds, to wait before disabling diff --git a/Documentation/networking/mptcp-sysctl.rst b/Documentation/networking/mptcp-sysctl.rst index 15f1919d640c..b797f3dd4b69 100644 --- a/Documentation/networking/mptcp-sysctl.rst +++ b/Documentation/networking/mptcp-sysctl.rst @@ -20,6 +20,8 @@ add_addr_timeout - INTEGER (seconds) resent to an MPTCP peer that has not acknowledged a previous ADD_ADDR message.
+ Do not retransmit if set to 0. + The default value matches TCP_RTO_MAX. This is a per-namespace sysctl.
diff --git a/Documentation/power/runtime_pm.rst b/Documentation/power/runtime_pm.rst index b6d5a3a8febc..f6a7cffdc129 100644 --- a/Documentation/power/runtime_pm.rst +++ b/Documentation/power/runtime_pm.rst @@ -398,10 +398,9 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: nonzero, increment the counter and return 1; otherwise return 0 without changing the counter
- `int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count);` + `int pm_runtime_get_if_active(struct device *dev);` - return -EINVAL if 'power.disable_depth' is nonzero; otherwise, if the - runtime PM status is RPM_ACTIVE, and either ign_usage_count is true - or the device's usage_count is non-zero, increment the counter and + runtime PM status is RPM_ACTIVE, increment the counter and return 1; otherwise return 0 without changing the counter
`void pm_runtime_put_noidle(struct device *dev);` diff --git a/Makefile b/Makefile index 685a65992449..9b288ccccd64 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 6 -SUBLEVEL = 102 +SUBLEVEL = 103 EXTRAVERSION = NAME = Pinguïn Aangedreven
@@ -1061,7 +1061,7 @@ KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
# userspace programs are linked via the compiler, use the correct linker -ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_LD_IS_LLD),yy) +ifdef CONFIG_CC_IS_CLANG KBUILD_USERLDFLAGS += $(call cc-option, --ld-path=$(LD)) endif
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index c7d2510e5a78..853c4f81ba4a 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@ -13,6 +13,7 @@ #define arch_set_freq_scale topology_set_freq_scale #define arch_scale_freq_capacity topology_get_freq_scale #define arch_scale_freq_invariant topology_scale_freq_invariant +#define arch_scale_freq_ref topology_get_freq_ref #endif
/* Replace task scheduler's default cpu-invariant accounting */ diff --git a/arch/arm/mach-rockchip/platsmp.c b/arch/arm/mach-rockchip/platsmp.c index 36915a073c23..f432d22bfed8 100644 --- a/arch/arm/mach-rockchip/platsmp.c +++ b/arch/arm/mach-rockchip/platsmp.c @@ -279,11 +279,6 @@ static void __init rockchip_smp_prepare_cpus(unsigned int max_cpus) }
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) { - if (rockchip_smp_prepare_sram(node)) { - of_node_put(node); - return; - } - /* enable the SCU power domain */ pmu_set_power_domain(PMU_PWRDN_SCU, true);
@@ -316,11 +311,19 @@ static void __init rockchip_smp_prepare_cpus(unsigned int max_cpus) asm ("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); ncores = ((l2ctlr >> 24) & 0x3) + 1; } - of_node_put(node);
/* Make sure that all cores except the first are really off */ for (i = 1; i < ncores; i++) pmu_set_power_domain(0 + i, false); + + if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) { + if (rockchip_smp_prepare_sram(node)) { + of_node_put(node); + return; + } + } + + of_node_put(node); }
static void __init rk3036_smp_prepare_cpus(unsigned int max_cpus) diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c index d5c805adf7a8..ea706fac6358 100644 --- a/arch/arm/mach-tegra/reset.c +++ b/arch/arm/mach-tegra/reset.c @@ -63,7 +63,7 @@ static void __init tegra_cpu_reset_handler_enable(void) BUG_ON(is_enabled); BUG_ON(tegra_cpu_reset_handler_size > TEGRA_IRAM_RESET_HANDLER_SIZE);
- memcpy(iram_base, (void *)__tegra_cpu_reset_handler_start, + memcpy_toio(iram_base, (void *)__tegra_cpu_reset_handler_start, tegra_cpu_reset_handler_size);
err = call_firmware_op(set_cpu_boot_addr, 0, reset_address); diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi index f156167b4e8a..3f74767d63ab 100644 --- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi @@ -531,7 +531,6 @@ sdhci0: mmc@fa10000 { clock-names = "clk_ahb", "clk_xin"; assigned-clocks = <&k3_clks 57 6>; assigned-clock-parents = <&k3_clks 57 8>; - mmc-ddr-1_8v; mmc-hs200-1_8v; ti,trm-icp = <0x2>; bus-width = <8>; diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi index e931c966b7f2..e98d043e5746 100644 --- a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi @@ -448,16 +448,16 @@ AM62X_IOPAD(0x01ec, PIN_INPUT_PULLUP, 0) /* (A17) I2C1_SDA */ /* SODIMM 12 */ /* Verdin I2C_2_DSI */ pinctrl_i2c2: main-i2c2-default-pins { pinctrl-single,pins = < - AM62X_IOPAD(0x00b0, PIN_INPUT, 1) /* (K22) GPMC0_CSn2.I2C2_SCL */ /* SODIMM 55 */ - AM62X_IOPAD(0x00b4, PIN_INPUT, 1) /* (K24) GPMC0_CSn3.I2C2_SDA */ /* SODIMM 53 */ + AM62X_IOPAD(0x00b0, PIN_INPUT_PULLUP, 1) /* (K22) GPMC0_CSn2.I2C2_SCL */ /* SODIMM 55 */ + AM62X_IOPAD(0x00b4, PIN_INPUT_PULLUP, 1) /* (K24) GPMC0_CSn3.I2C2_SDA */ /* SODIMM 53 */ >; };
/* Verdin I2C_4_CSI */ pinctrl_i2c3: main-i2c3-default-pins { pinctrl-single,pins = < - AM62X_IOPAD(0x01d0, PIN_INPUT, 2) /* (A15) UART0_CTSn.I2C3_SCL */ /* SODIMM 95 */ - AM62X_IOPAD(0x01d4, PIN_INPUT, 2) /* (B15) UART0_RTSn.I2C3_SDA */ /* SODIMM 93 */ + AM62X_IOPAD(0x01d0, PIN_INPUT_PULLUP, 2) /* (A15) UART0_CTSn.I2C3_SCL */ /* SODIMM 95 */ + AM62X_IOPAD(0x01d4, PIN_INPUT_PULLUP, 2) /* (B15) UART0_RTSn.I2C3_SDA */ /* SODIMM 93 */ >; };
@@ -729,8 +729,8 @@ AM62X_MCU_IOPAD(0x0010, PIN_INPUT, 7) /* (C9) MCU_SPI0_D1.MCU_GPIO0_4 */ /* SODI /* Verdin I2C_3_HDMI */ pinctrl_mcu_i2c0: mcu-i2c0-default-pins { pinctrl-single,pins = < - AM62X_MCU_IOPAD(0x0044, PIN_INPUT, 0) /* (A8) MCU_I2C0_SCL */ /* SODIMM 59 */ - AM62X_MCU_IOPAD(0x0048, PIN_INPUT, 0) /* (D10) MCU_I2C0_SDA */ /* SODIMM 57 */ + AM62X_MCU_IOPAD(0x0044, PIN_INPUT_PULLUP, 0) /* (A8) MCU_I2C0_SCL */ /* SODIMM 59 */ + AM62X_MCU_IOPAD(0x0048, PIN_INPUT_PULLUP, 0) /* (D10) MCU_I2C0_SDA */ /* SODIMM 57 */ >; };
diff --git a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts index 99f2878de4c6..12cbf253a891 100644 --- a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts +++ b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts @@ -144,8 +144,8 @@ AM62AX_IOPAD(0x1cc, PIN_OUTPUT, 0) /* (D15) UART0_TXD */
main_uart1_pins_default: main-uart1-default-pins { pinctrl-single,pins = < - AM62AX_IOPAD(0x01e8, PIN_INPUT, 1) /* (C17) I2C1_SCL.UART1_RXD */ - AM62AX_IOPAD(0x01ec, PIN_OUTPUT, 1) /* (E17) I2C1_SDA.UART1_TXD */ + AM62AX_IOPAD(0x01ac, PIN_INPUT, 2) /* (B21) MCASP0_AFSR.UART1_RXD */ + AM62AX_IOPAD(0x01b0, PIN_OUTPUT, 2) /* (A21) MCASP0_ACLKR.UART1_TXD */ AM62AX_IOPAD(0x0194, PIN_INPUT, 2) /* (C19) MCASP0_AXR3.UART1_CTSn */ AM62AX_IOPAD(0x0198, PIN_OUTPUT, 2) /* (B19) MCASP0_AXR2.UART1_RTSn */ >; diff --git a/arch/arm64/boot/dts/ti/k3-pinctrl.h b/arch/arm64/boot/dts/ti/k3-pinctrl.h index 2a4e0e084d69..8e0869d9b7a0 100644 --- a/arch/arm64/boot/dts/ti/k3-pinctrl.h +++ b/arch/arm64/boot/dts/ti/k3-pinctrl.h @@ -8,11 +8,16 @@ #ifndef DTS_ARM64_TI_K3_PINCTRL_H #define DTS_ARM64_TI_K3_PINCTRL_H
+#define ST_EN_SHIFT (14) #define PULLUDEN_SHIFT (16) #define PULLTYPESEL_SHIFT (17) #define RXACTIVE_SHIFT (18) #define DEBOUNCE_SHIFT (11)
+/* Schmitt trigger configuration */ +#define ST_DISABLE (0 << ST_EN_SHIFT) +#define ST_ENABLE (1 << ST_EN_SHIFT) + #define PULL_DISABLE (1 << PULLUDEN_SHIFT) #define PULL_ENABLE (0 << PULLUDEN_SHIFT)
@@ -26,9 +31,13 @@ #define PIN_OUTPUT (INPUT_DISABLE | PULL_DISABLE) #define PIN_OUTPUT_PULLUP (INPUT_DISABLE | PULL_UP) #define PIN_OUTPUT_PULLDOWN (INPUT_DISABLE | PULL_DOWN) -#define PIN_INPUT (INPUT_EN | PULL_DISABLE) -#define PIN_INPUT_PULLUP (INPUT_EN | PULL_UP) -#define PIN_INPUT_PULLDOWN (INPUT_EN | PULL_DOWN) +#define PIN_INPUT (INPUT_EN | ST_ENABLE | PULL_DISABLE) +#define PIN_INPUT_PULLUP (INPUT_EN | ST_ENABLE | PULL_UP) +#define PIN_INPUT_PULLDOWN (INPUT_EN | ST_ENABLE | PULL_DOWN) +/* Input configurations with Schmitt Trigger disabled */ +#define PIN_INPUT_NOST (INPUT_EN | PULL_DISABLE) +#define PIN_INPUT_PULLUP_NOST (INPUT_EN | PULL_UP) +#define PIN_INPUT_PULLDOWN_NOST (INPUT_EN | PULL_DOWN)
#define PIN_DEBOUNCE_DISABLE (0 << DEBOUNCE_SHIFT) #define PIN_DEBOUNCE_CONF1 (1 << DEBOUNCE_SHIFT) diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index a407f9cd549e..c07a58b96329 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -150,7 +150,7 @@ acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor) {} #endif
-static inline const char *acpi_get_enable_method(int cpu) +static __always_inline const char *acpi_get_enable_method(int cpu) { if (acpi_psci_present()) return "psci"; diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index 9fab663dd2de..a323b109b9c4 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -23,6 +23,7 @@ void update_freq_counters_refs(void); #define arch_set_freq_scale topology_set_freq_scale #define arch_scale_freq_capacity topology_get_freq_scale #define arch_scale_freq_invariant topology_scale_freq_invariant +#define arch_scale_freq_ref topology_get_freq_ref
#ifdef CONFIG_ACPI_CPPC_LIB #define arch_init_invariance_cppc topology_init_cpu_capacity_cppc diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index a1e0cc5353fb..d0d836448a76 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1876,10 +1876,10 @@ void fpsimd_save_and_flush_cpu_state(void) if (!system_supports_fpsimd()) return; WARN_ON(preemptible()); - __get_cpu_fpsimd_context(); + get_cpu_fpsimd_context(); fpsimd_save(); fpsimd_flush_cpu_state(); - __put_cpu_fpsimd_context(); + put_cpu_fpsimd_context(); }
#ifdef CONFIG_KERNEL_MODE_NEON diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 817d788cd866..1a2c72f3e7f8 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -82,7 +82,12 @@ int __init parse_acpi_topology(void) #undef pr_fmt #define pr_fmt(fmt) "AMU: " fmt
-static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale); +/* + * Ensure that amu_scale_freq_tick() will return SCHED_CAPACITY_SCALE until + * the CPU capacity and its associated frequency have been correctly + * initialized. + */ +static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale) = 1UL << (2 * SCHED_CAPACITY_SHIFT); static DEFINE_PER_CPU(u64, arch_const_cycles_prev); static DEFINE_PER_CPU(u64, arch_core_cycles_prev); static cpumask_var_t amu_fie_cpus; @@ -112,14 +117,14 @@ static inline bool freq_counters_valid(int cpu) return true; }
-static int freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate) +void freq_inv_set_max_ratio(int cpu, u64 max_rate) { - u64 ratio; + u64 ratio, ref_rate = arch_timer_get_rate();
if (unlikely(!max_rate || !ref_rate)) { - pr_debug("CPU%d: invalid maximum or reference frequency.\n", + WARN_ONCE(1, "CPU%d: invalid maximum or reference frequency.\n", cpu); - return -EINVAL; + return; }
/* @@ -139,12 +144,10 @@ static int freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate) ratio = div64_u64(ratio, max_rate); if (!ratio) { WARN_ONCE(1, "Reference frequency too low.\n"); - return -EINVAL; + return; }
- per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio; - - return 0; + WRITE_ONCE(per_cpu(arch_max_freq_scale, cpu), (unsigned long)ratio); }
static void amu_scale_freq_tick(void) @@ -195,10 +198,7 @@ static void amu_fie_setup(const struct cpumask *cpus) return;
for_each_cpu(cpu, cpus) { - if (!freq_counters_valid(cpu) || - freq_inv_set_max_ratio(cpu, - cpufreq_get_hw_max_freq(cpu) * 1000ULL, - arch_timer_get_rate())) + if (!freq_counters_valid(cpu)) return; }
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 8b70759cdbb9..610f8a1099f5 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -953,6 +953,7 @@ void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigne
void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr) { + add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); console_verbose();
pr_crit("SError Interrupt on CPU%d, code 0x%016lx -- %s\n", diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 2e5d1e238af9..893b9485b840 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -753,6 +753,7 @@ static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs) */ siaddr = untagged_addr(far); } + add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr);
return 0; diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c index 68bf1a125502..1e308328c079 100644 --- a/arch/arm64/mm/ptdump_debugfs.c +++ b/arch/arm64/mm/ptdump_debugfs.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/debugfs.h> -#include <linux/memory_hotplug.h> #include <linux/seq_file.h>
#include <asm/ptdump.h> @@ -9,9 +8,7 @@ static int ptdump_show(struct seq_file *m, void *v) { struct ptdump_info *info = m->private;
- get_online_mems(); ptdump_walk(m, info); - put_online_mems(); return 0; } DEFINE_SHOW_ATTRIBUTE(ptdump); diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c index e2f30ff9afde..a43ba7f9f987 100644 --- a/arch/loongarch/kernel/module-sections.c +++ b/arch/loongarch/kernel/module-sections.c @@ -8,6 +8,7 @@ #include <linux/module.h> #include <linux/moduleloader.h> #include <linux/ftrace.h> +#include <linux/sort.h>
Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val) { @@ -61,39 +62,38 @@ Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr v return (Elf_Addr)&plt[nr]; }
-static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y) -{ - return x->r_info == y->r_info && x->r_addend == y->r_addend; -} +#define cmp_3way(a, b) ((a) < (b) ? -1 : (a) > (b))
-static bool duplicate_rela(const Elf_Rela *rela, int idx) +static int compare_rela(const void *x, const void *y) { - int i; + int ret; + const Elf_Rela *rela_x = x, *rela_y = y;
- for (i = 0; i < idx; i++) { - if (is_rela_equal(&rela[i], &rela[idx])) - return true; - } + ret = cmp_3way(rela_x->r_info, rela_y->r_info); + if (ret == 0) + ret = cmp_3way(rela_x->r_addend, rela_y->r_addend);
- return false; + return ret; }
static void count_max_entries(Elf_Rela *relas, int num, unsigned int *plts, unsigned int *gots) { - unsigned int i, type; + unsigned int i; + + sort(relas, num, sizeof(Elf_Rela), compare_rela, NULL);
for (i = 0; i < num; i++) { - type = ELF_R_TYPE(relas[i].r_info); - switch (type) { + if (i && !compare_rela(&relas[i-1], &relas[i])) + continue; + + switch (ELF_R_TYPE(relas[i].r_info)) { case R_LARCH_SOP_PUSH_PLT_PCREL: case R_LARCH_B26: - if (!duplicate_rela(relas, i)) - (*plts)++; + (*plts)++; break; case R_LARCH_GOT_PC_HI20: - if (!duplicate_rela(relas, i)) - (*gots)++; + (*gots)++; break; default: break; /* Do nothing. */ diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index dcb1428b458c..869003f1c703 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -203,11 +203,9 @@ bool bpf_jit_supports_kfunc_call(void) return true; }
-/* initialized on the first pass of build_body() */ -static int out_offset = -1; -static int emit_bpf_tail_call(struct jit_ctx *ctx) +static int emit_bpf_tail_call(struct jit_ctx *ctx, int insn) { - int off; + int off, tc_ninsn = 0; u8 tcc = tail_call_reg(ctx); u8 a1 = LOONGARCH_GPR_A1; u8 a2 = LOONGARCH_GPR_A2; @@ -217,7 +215,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) const int idx0 = ctx->idx;
#define cur_offset (ctx->idx - idx0) -#define jmp_offset (out_offset - (cur_offset)) +#define jmp_offset (tc_ninsn - (cur_offset))
/* * a0: &ctx @@ -227,6 +225,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) * if (index >= array->map.max_entries) * goto out; */ + tc_ninsn = insn ? ctx->offset[insn+1] - ctx->offset[insn] : ctx->offset[0]; off = offsetof(struct bpf_array, map.max_entries); emit_insn(ctx, ldwu, t1, a1, off); /* bgeu $a2, $t1, jmp_offset */ @@ -258,15 +257,6 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit_insn(ctx, ldd, t3, t2, off); __build_epilogue(ctx, true);
- /* out: */ - if (out_offset == -1) - out_offset = cur_offset; - if (cur_offset != out_offset) { - pr_err_once("tail_call out_offset = %d, expected %d!\n", - cur_offset, out_offset); - return -1; - } - return 0;
toofar: @@ -853,7 +843,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext /* tail call */ case BPF_JMP | BPF_TAIL_CALL: mark_tail_call(ctx); - if (emit_bpf_tail_call(ctx) < 0) + if (emit_bpf_tail_call(ctx, i) < 0) return -EINVAL; break;
@@ -1251,7 +1241,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) if (tmp_blinded) bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog);
- out_offset = -1;
return prog; } diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S index 397114962a14..10f6aa4d8f05 100644 --- a/arch/m68k/kernel/head.S +++ b/arch/m68k/kernel/head.S @@ -3404,6 +3404,7 @@ L(console_clear_loop):
movel %d4,%d1 /* screen height in pixels */ divul %a0@(FONT_DESC_HEIGHT),%d1 /* d1 = max num rows */ + subql #1,%d1 /* row range is 0 to num - 1 */
movel %d0,%a2@(Lconsole_struct_num_columns) movel %d1,%a2@(Lconsole_struct_num_rows) @@ -3550,15 +3551,14 @@ func_start console_putc,%a0/%a1/%d0-%d7 cmpib #10,%d7 jne L(console_not_lf) movel %a0@(Lconsole_struct_cur_row),%d0 - addil #1,%d0 - movel %d0,%a0@(Lconsole_struct_cur_row) movel %a0@(Lconsole_struct_num_rows),%d1 cmpl %d1,%d0 jcs 1f - subil #1,%d0 - movel %d0,%a0@(Lconsole_struct_cur_row) console_scroll + jra L(console_exit) 1: + addql #1,%d0 + movel %d0,%a0@(Lconsole_struct_cur_row) jra L(console_exit)
L(console_not_lf): @@ -3585,12 +3585,6 @@ L(console_not_cr): */ L(console_not_home): movel %a0@(Lconsole_struct_cur_column),%d0 - addql #1,%a0@(Lconsole_struct_cur_column) - movel %a0@(Lconsole_struct_num_columns),%d1 - cmpl %d1,%d0 - jcs 1f - console_putc #'\n' /* recursion is OK! */ -1: movel %a0@(Lconsole_struct_cur_row),%d1
/* @@ -3637,6 +3631,23 @@ L(console_do_font_scanline): addq #1,%d1 dbra %d7,L(console_read_char_scanline)
+ /* + * Register usage in the code below: + * a0 = pointer to console globals + * d0 = cursor column + * d1 = cursor column limit + */ + + lea %pc@(L(console_globals)),%a0 + + movel %a0@(Lconsole_struct_cur_column),%d0 + addql #1,%d0 + movel %d0,%a0@(Lconsole_struct_cur_column) /* Update cursor pos */ + movel %a0@(Lconsole_struct_num_columns),%d1 + cmpl %d1,%d0 + jcs L(console_exit) + console_putc #'\n' /* Line wrap using tail recursion */ + L(console_exit): func_return console_putc
diff --git a/arch/mips/crypto/chacha-core.S b/arch/mips/crypto/chacha-core.S index 5755f69cfe00..706aeb850fb0 100644 --- a/arch/mips/crypto/chacha-core.S +++ b/arch/mips/crypto/chacha-core.S @@ -55,17 +55,13 @@ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ #define MSB 0 #define LSB 3 -#define ROTx rotl -#define ROTR(n) rotr n, 24 #define CPU_TO_LE32(n) \ - wsbh n; \ + wsbh n, n; \ rotr n, 16; #else #define MSB 3 #define LSB 0 -#define ROTx rotr #define CPU_TO_LE32(n) -#define ROTR(n) #endif
#define FOR_EACH_WORD(x) \ @@ -192,10 +188,10 @@ CONCAT3(.Lchacha_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \ xor X(W), X(B); \ xor X(Y), X(C); \ xor X(Z), X(D); \ - rotl X(V), S; \ - rotl X(W), S; \ - rotl X(Y), S; \ - rotl X(Z), S; + rotr X(V), 32 - S; \ + rotr X(W), 32 - S; \ + rotr X(Y), 32 - S; \ + rotr X(Z), 32 - S;
.text .set reorder @@ -372,21 +368,19 @@ chacha_crypt_arch: /* First byte */ lbu T1, 0(IN) addiu $at, BYTES, 1 - CPU_TO_LE32(SAVED_X) - ROTR(SAVED_X) xor T1, SAVED_X sb T1, 0(OUT) beqz $at, .Lchacha_mips_xor_done /* Second byte */ lbu T1, 1(IN) addiu $at, BYTES, 2 - ROTx SAVED_X, 8 + rotr SAVED_X, 8 xor T1, SAVED_X sb T1, 1(OUT) beqz $at, .Lchacha_mips_xor_done /* Third byte */ lbu T1, 2(IN) - ROTx SAVED_X, 8 + rotr SAVED_X, 8 xor T1, SAVED_X sb T1, 2(OUT) b .Lchacha_mips_xor_done diff --git a/arch/mips/include/asm/vpe.h b/arch/mips/include/asm/vpe.h index 61fd4d0aeda4..c0769dc4b853 100644 --- a/arch/mips/include/asm/vpe.h +++ b/arch/mips/include/asm/vpe.h @@ -119,4 +119,12 @@ void cleanup_tc(struct tc *tc);
int __init vpe_module_init(void); void __exit vpe_module_exit(void); + +#ifdef CONFIG_MIPS_VPE_LOADER_MT +void *vpe_alloc(void); +int vpe_start(void *vpe, unsigned long start); +int vpe_stop(void *vpe); +int vpe_free(void *vpe); +#endif /* CONFIG_MIPS_VPE_LOADER_MT */ + #endif /* _ASM_VPE_H */ diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index b630604c577f..02aa6a04a21d 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -690,18 +690,20 @@ unsigned long mips_stack_top(void) }
/* Space for the VDSO, data page & GIC user page */ - top -= PAGE_ALIGN(current->thread.abi->vdso->size); - top -= PAGE_SIZE; - top -= mips_gic_present() ? PAGE_SIZE : 0; + if (current->thread.abi) { + top -= PAGE_ALIGN(current->thread.abi->vdso->size); + top -= PAGE_SIZE; + top -= mips_gic_present() ? PAGE_SIZE : 0; + + /* Space to randomize the VDSO base */ + if (current->flags & PF_RANDOMIZE) + top -= VDSO_RANDOMIZE_SIZE; + }
/* Space for cache colour alignment */ if (cpu_has_dc_aliases) top -= shm_align_mask + 1;
- /* Space to randomize the VDSO base */ - if (current->flags & PF_RANDOMIZE) - top -= VDSO_RANDOMIZE_SIZE; - return top; }
diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c index 1187729d8cbb..357543996ee6 100644 --- a/arch/mips/lantiq/falcon/sysctrl.c +++ b/arch/mips/lantiq/falcon/sysctrl.c @@ -214,19 +214,16 @@ void __init ltq_soc_init(void) of_node_put(np_syseth); of_node_put(np_sysgpe);
- if ((request_mem_region(res_status.start, resource_size(&res_status), - res_status.name) < 0) || - (request_mem_region(res_ebu.start, resource_size(&res_ebu), - res_ebu.name) < 0) || - (request_mem_region(res_sys[0].start, - resource_size(&res_sys[0]), - res_sys[0].name) < 0) || - (request_mem_region(res_sys[1].start, - resource_size(&res_sys[1]), - res_sys[1].name) < 0) || - (request_mem_region(res_sys[2].start, - resource_size(&res_sys[2]), - res_sys[2].name) < 0)) + if ((!request_mem_region(res_status.start, resource_size(&res_status), + res_status.name)) || + (!request_mem_region(res_ebu.start, resource_size(&res_ebu), + res_ebu.name)) || + (!request_mem_region(res_sys[0].start, resource_size(&res_sys[0]), + res_sys[0].name)) || + (!request_mem_region(res_sys[1].start, resource_size(&res_sys[1]), + res_sys[1].name)) || + (!request_mem_region(res_sys[2].start, resource_size(&res_sys[2]), + res_sys[2].name))) pr_err("Failed to request core resources");
status_membase = ioremap(res_status.start, diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 920db57b6b4c..5f0a1f1b00a7 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -39,7 +39,9 @@ endif
export LD_BFD
-# Set default 32 bits cross compilers for vdso +# Set default 32 bits cross compilers for vdso. +# This means that for 64BIT, both the 64-bit tools and the 32-bit tools +# need to be in the path. CC_ARCHES_32 = hppa hppa2.0 hppa1.1 CC_SUFFIXES = linux linux-gnu unknown-linux-gnu suse-linux CROSS32_COMPILE := $(call cc-cross-prefix, \ @@ -139,7 +141,7 @@ palo lifimage: vmlinuz fi @if test ! -f "$(PALOCONF)"; then \ cp $(srctree)/arch/parisc/defpalo.conf $(objtree)/palo.conf; \ - echo 'A generic palo config file ($(objree)/palo.conf) has been created for you.'; \ + echo 'A generic palo config file ($(objtree)/palo.conf) has been created for you.'; \ echo 'You should check it and re-run "make palo".'; \ echo 'WARNING: the "lifimage" file is now placed in this directory by default!'; \ false; \ diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index babf65751e81..3446a5e2520b 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -276,7 +276,7 @@ extern unsigned long *empty_zero_page; #define pte_none(x) (pte_val(x) == 0) #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) #define pte_user(x) (pte_val(x) & _PAGE_USER) -#define pte_clear(mm, addr, xp) set_pte(xp, __pte(0)) +#define pte_clear(mm, addr, xp) set_pte_at((mm), (addr), (xp), __pte(0))
#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) @@ -398,6 +398,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, } } #define set_ptes set_ptes +#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
/* Used for deferring calls to flush_dcache_page() */
@@ -462,7 +463,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned if (!pte_young(pte)) { return 0; } - set_pte(ptep, pte_mkold(pte)); + set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); return 1; }
@@ -472,7 +473,7 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *pt struct mm_struct; static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - set_pte(ptep, pte_wrprotect(*ptep)); + set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep)); }
#define pte_same(A,B) (pte_val(A) == pte_val(B)) diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h index 51f40eaf7780..1013eeba31e5 100644 --- a/arch/parisc/include/asm/special_insns.h +++ b/arch/parisc/include/asm/special_insns.h @@ -32,6 +32,34 @@ pa; \ })
+/** + * prober_user() - Probe user read access + * @sr: Space regster. + * @va: Virtual address. + * + * Return: Non-zero if address is accessible. + * + * Due to the way _PAGE_READ is handled in TLB entries, we need + * a special check to determine whether a user address is accessible. + * The ldb instruction does the initial access check. If it is + * successful, the probe instruction checks user access rights. + */ +#define prober_user(sr, va) ({ \ + unsigned long read_allowed; \ + __asm__ __volatile__( \ + "copy %%r0,%0\n" \ + "8:\tldb 0(%%sr%1,%2),%%r0\n" \ + "\tproberi (%%sr%1,%2),%3,%0\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \ + "or %%r0,%%r0,%%r0") \ + : "=&r" (read_allowed) \ + : "i" (sr), "r" (va), "i" (PRIV_USER) \ + : "memory" \ + ); \ + read_allowed; \ +}) + #define CR_EIEM 15 /* External Interrupt Enable Mask */ #define CR_CR16 16 /* CR16 Interval Timer */ #define CR_EIRR 23 /* External Interrupt Request Register */ diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 88d0ae5769dd..6c531d2c847e 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -42,9 +42,24 @@ __gu_err; \ })
-#define __get_user(val, ptr) \ -({ \ - __get_user_internal(SR_USER, val, ptr); \ +#define __probe_user_internal(sr, error, ptr) \ +({ \ + __asm__("\tproberi (%%sr%1,%2),%3,%0\n" \ + "\tcmpiclr,= 1,%0,%0\n" \ + "\tldi %4,%0\n" \ + : "=r"(error) \ + : "i"(sr), "r"(ptr), "i"(PRIV_USER), \ + "i"(-EFAULT)); \ +}) + +#define __get_user(val, ptr) \ +({ \ + register long __gu_err; \ + \ + __gu_err = __get_user_internal(SR_USER, val, ptr); \ + if (likely(!__gu_err)) \ + __probe_user_internal(SR_USER, __gu_err, ptr); \ + __gu_err; \ })
#define __get_user_asm(sr, val, ldx, ptr) \ diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index f7953b0391cf..1898956a70f2 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -425,7 +425,7 @@ static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr) return ptep; }
-static inline bool pte_needs_flush(pte_t pte) +static inline bool pte_needs_cache_flush(pte_t pte) { return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE)) == (_PAGE_PRESENT | _PAGE_ACCESSED); @@ -630,7 +630,7 @@ static void flush_cache_page_if_present(struct vm_area_struct *vma, ptep = get_ptep(vma->vm_mm, vmaddr); if (ptep) { pte = ptep_get(ptep); - needs_flush = pte_needs_flush(pte); + needs_flush = pte_needs_cache_flush(pte); pte_unmap(ptep); } if (needs_flush) @@ -841,7 +841,7 @@ void flush_cache_vmap(unsigned long start, unsigned long end) }
vm = find_vm_area((void *)start); - if (WARN_ON_ONCE(!vm)) { + if (!vm) { flush_cache_all(); return; } diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index ea57bcc21dc5..f4bf61a34701 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -499,6 +499,12 @@ * this happens is quite subtle, read below */ .macro make_insert_tlb spc,pte,prot,tmp space_to_prot \spc \prot /* create prot id from space */ + +#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT + /* need to drop DMB bit, as it's used as SPECIAL flag */ + depi 0,_PAGE_SPECIAL_BIT,1,\pte +#endif + /* The following is the real subtlety. This is depositing * T <-> _PAGE_REFTRAP * D <-> _PAGE_DIRTY @@ -511,17 +517,18 @@ * Finally, _PAGE_READ goes in the top bit of PL1 (so we * trigger an access rights trap in user space if the user * tries to read an unreadable page */ -#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT - /* need to drop DMB bit, as it's used as SPECIAL flag */ - depi 0,_PAGE_SPECIAL_BIT,1,\pte -#endif depd \pte,8,7,\prot
/* PAGE_USER indicates the page can be read with user privileges, * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1 - * contains _PAGE_READ) */ + * contains _PAGE_READ). While the kernel can't directly write + * user pages which have _PAGE_WRITE zero, it can read pages + * which have _PAGE_READ zero (PL <= PL1). Thus, the kernel + * exception fault handler doesn't trigger when reading pages + * that aren't user read accessible */ extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0 depdi 7,11,3,\prot + /* If we're a gateway page, drop PL2 back to zero for promotion * to kernel privilege (so we can execute the page as kernel). * Any privilege promotion page always denys read and write */ diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 0fa81bf1466b..f58c4bccfbce 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -613,6 +613,9 @@ lws_compare_and_swap32: lws_compare_and_swap: /* Trigger memory reference interruptions without writing to memory */ 1: ldw 0(%r26), %r28 + proberi (%r26), PRIV_USER, %r28 + comb,=,n %r28, %r0, lws_fault /* backwards, likely not taken */ + nop 2: stbys,e %r0, 0(%r26)
/* Calculate 8-bit hash index from virtual address */ @@ -767,6 +770,9 @@ cas2_lock_start: copy %r26, %r28 depi_safe 0, 31, 2, %r28 10: ldw 0(%r28), %r1 + proberi (%r28), PRIV_USER, %r1 + comb,=,n %r1, %r0, lws_fault /* backwards, likely not taken */ + nop 11: stbys,e %r0, 0(%r28)
/* Calculate 8-bit hash index from virtual address */ @@ -951,41 +957,47 @@ atomic_xchg_begin:
/* 8-bit exchange */ 1: ldb 0(%r24), %r20 + proberi (%r24), PRIV_USER, %r20 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ + nop copy %r23, %r20 depi_safe 0, 31, 2, %r20 b atomic_xchg_start 2: stbys,e %r0, 0(%r20) - nop - nop - nop
/* 16-bit exchange */ 3: ldh 0(%r24), %r20 + proberi (%r24), PRIV_USER, %r20 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ + nop copy %r23, %r20 depi_safe 0, 31, 2, %r20 b atomic_xchg_start 4: stbys,e %r0, 0(%r20) - nop - nop - nop
/* 32-bit exchange */ 5: ldw 0(%r24), %r20 + proberi (%r24), PRIV_USER, %r20 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ + nop b atomic_xchg_start 6: stbys,e %r0, 0(%r23) nop nop - nop - nop - nop
/* 64-bit exchange */ #ifdef CONFIG_64BIT 7: ldd 0(%r24), %r20 + proberi (%r24), PRIV_USER, %r20 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ + nop 8: stdby,e %r0, 0(%r23) #else 7: ldw 0(%r24), %r20 8: ldw 4(%r24), %r20 + proberi (%r24), PRIV_USER, %r20 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ + nop copy %r23, %r20 depi_safe 0, 31, 2, %r20 9: stbys,e %r0, 0(%r20) diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index 5fc0c852c84c..69d65ffab312 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -12,6 +12,7 @@ #include <linux/module.h> #include <linux/compiler.h> #include <linux/uaccess.h> +#include <linux/mm.h>
#define get_user_space() mfsp(SR_USER) #define get_kernel_space() SR_KERNEL @@ -32,9 +33,25 @@ EXPORT_SYMBOL(raw_copy_to_user); unsigned long raw_copy_from_user(void *dst, const void __user *src, unsigned long len) { + unsigned long start = (unsigned long) src; + unsigned long end = start + len; + unsigned long newlen = len; + mtsp(get_user_space(), SR_TEMP1); mtsp(get_kernel_space(), SR_TEMP2); - return pa_memcpy(dst, (void __force *)src, len); + + /* Check region is user accessible */ + if (start) + while (start < end) { + if (!prober_user(SR_TEMP1, start)) { + newlen = (start - (unsigned long) src); + break; + } + start += PAGE_SIZE; + /* align to page boundry which may have different permission */ + start = PAGE_ALIGN_DOWN(start); + } + return len - newlen + pa_memcpy(dst, (void __force *)src, newlen); } EXPORT_SYMBOL(raw_copy_from_user);
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index c39de84e98b0..f1785640b049 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -363,6 +363,10 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, mmap_read_unlock(mm);
bad_area_nosemaphore: + if (!user_mode(regs) && fixup_exception(regs)) { + return; + } + if (user_mode(regs)) { int signo, si_code;
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 968aee2025b8..99c39c9b2a71 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -72,6 +72,7 @@ BOOTCPPFLAGS := -nostdinc $(LINUXINCLUDE) BOOTCPPFLAGS += -isystem $(shell $(BOOTCC) -print-file-name=include)
BOOTCFLAGS := $(BOOTTARGETFLAGS) \ + -std=gnu11 \ -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -O2 \ -msoft-float -mno-altivec -mno-vsx \ diff --git a/arch/powerpc/include/asm/floppy.h b/arch/powerpc/include/asm/floppy.h index f8ce178b43b7..34abf8bea2cc 100644 --- a/arch/powerpc/include/asm/floppy.h +++ b/arch/powerpc/include/asm/floppy.h @@ -144,9 +144,12 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io) bus_addr = 0; }
- if (!bus_addr) /* need to map it */ + if (!bus_addr) { /* need to map it */ bus_addr = dma_map_single(&isa_bridge_pcidev->dev, addr, size, dir); + if (dma_mapping_error(&isa_bridge_pcidev->dev, bus_addr)) + return -ENOMEM; + }
/* remember this one as prev */ prev_addr = addr; diff --git a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c index 4a25b6b48615..f1e353fc6594 100644 --- a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c +++ b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c @@ -240,10 +240,8 @@ static int mpc512x_lpbfifo_kick(void) dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
/* Make DMA channel work with LPB FIFO data register */ - if (dma_dev->device_config(lpbfifo.chan, &dma_conf)) { - ret = -EINVAL; - goto err_dma_prep; - } + if (dma_dev->device_config(lpbfifo.chan, &dma_conf)) + return -EINVAL;
sg_init_table(&sg, 1);
diff --git a/arch/riscv/include/asm/topology.h b/arch/riscv/include/asm/topology.h index e316ab3b77f3..61183688bdd5 100644 --- a/arch/riscv/include/asm/topology.h +++ b/arch/riscv/include/asm/topology.h @@ -9,6 +9,7 @@ #define arch_set_freq_scale topology_set_freq_scale #define arch_scale_freq_capacity topology_get_freq_scale #define arch_scale_freq_invariant topology_scale_freq_invariant +#define arch_scale_freq_ref topology_get_freq_ref
/* Replace task scheduler's default cpu-invariant accounting */ #define arch_scale_cpu_capacity topology_get_cpu_scale diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c index 4024599eb448..3612af9b4890 100644 --- a/arch/s390/hypfs/hypfs_dbfs.c +++ b/arch/s390/hypfs/hypfs_dbfs.c @@ -6,6 +6,7 @@ * Author(s): Michael Holzheu holzheu@linux.vnet.ibm.com */
+#include <linux/security.h> #include <linux/slab.h> #include "hypfs.h"
@@ -64,24 +65,28 @@ static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) long rc;
mutex_lock(&df->lock); - if (df->unlocked_ioctl) - rc = df->unlocked_ioctl(file, cmd, arg); - else - rc = -ENOTTY; + rc = df->unlocked_ioctl(file, cmd, arg); mutex_unlock(&df->lock); return rc; }
-static const struct file_operations dbfs_ops = { +static const struct file_operations dbfs_ops_ioctl = { .read = dbfs_read, .llseek = no_llseek, .unlocked_ioctl = dbfs_ioctl, };
+static const struct file_operations dbfs_ops = { + .read = dbfs_read, +}; + void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df) { - df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, - &dbfs_ops); + const struct file_operations *fops = &dbfs_ops; + + if (df->unlocked_ioctl && !security_locked_down(LOCKDOWN_DEBUGFS)) + fops = &dbfs_ops_ioctl; + df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, fops); mutex_init(&df->lock); }
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 4d646659a5f5..f7a44af12c2f 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -192,13 +192,6 @@ static inline unsigned long get_tod_clock_fast(void) asm volatile("stckf %0" : "=Q" (clk) : : "cc"); return clk; } - -static inline cycles_t get_cycles(void) -{ - return (cycles_t) get_tod_clock() >> 2; -} -#define get_cycles get_cycles - int get_phys_clock(unsigned long *clock); void init_cpu_timer(void);
@@ -226,6 +219,12 @@ static inline unsigned long get_tod_clock_monotonic(void) return tod; }
+static inline cycles_t get_cycles(void) +{ + return (cycles_t)get_tod_clock_monotonic() >> 2; +} +#define get_cycles get_cycles + /** * tod_to_ns - convert a TOD format value to nanoseconds * @todval: to be converted TOD format value diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index d34d3548c046..086d3e3ffdea 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -579,7 +579,7 @@ static int stp_sync_clock(void *data) atomic_dec(&sync->cpus); /* Wait for in_sync to be set. */ while (READ_ONCE(sync->in_sync) == 0) - __udelay(1); + ; } if (sync->in_sync != 1) /* Didn't work. Clear per-cpu in sync bit again. */ diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index b51666967aa1..4721ada81a02 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c @@ -249,11 +249,9 @@ static int ptdump_show(struct seq_file *m, void *v) .marker = address_markers, };
- get_online_mems(); mutex_lock(&cpa_mutex); ptdump_walk_pgd(&st.ptdump, &init_mm, NULL); mutex_unlock(&cpa_mutex); - put_online_mems(); return 0; } DEFINE_SHOW_ATTRIBUTE(ptdump); diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 9355fbe5f51e..2f534b26fda6 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -456,11 +456,6 @@ void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable) page = virt_to_page(pgtable); SetPageActive(page); page_table_free(mm, (unsigned long *)pgtable); - /* - * page_table_free() does not do the pgste gmap_unlink() which - * page_table_free_rcu() does: warn us if pgste ever reaches here. - */ - WARN_ON_ONCE(mm_has_pgste(mm)); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h index c7b4b49826a2..40d823f36c09 100644 --- a/arch/um/include/asm/thread_info.h +++ b/arch/um/include/asm/thread_info.h @@ -68,7 +68,11 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_MEMDIE (1 << TIF_MEMDIE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \ + _TIF_NOTIFY_RESUME) + #endif diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index afe67d816146..e7fbf610bda8 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -98,14 +98,18 @@ void *__switch_to(struct task_struct *from, struct task_struct *to) void interrupt_end(void) { struct pt_regs *regs = ¤t->thread.regs; - - if (need_resched()) - schedule(); - if (test_thread_flag(TIF_SIGPENDING) || - test_thread_flag(TIF_NOTIFY_SIGNAL)) - do_signal(regs); - if (test_thread_flag(TIF_NOTIFY_RESUME)) - resume_user_mode_work(regs); + unsigned long thread_flags; + + thread_flags = read_thread_flags(); + while (thread_flags & _TIF_WORK_MASK) { + if (thread_flags & _TIF_NEED_RESCHED) + schedule(); + if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) + do_signal(regs); + if (thread_flags & _TIF_NOTIFY_RESUME) + resume_user_mode_work(regs); + thread_flags = read_thread_flags(); + } }
int get_current_pid(void) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index e59ded976166..a0a4fc684e63 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -48,7 +48,6 @@ KVM_X86_OP(set_idt) KVM_X86_OP(get_gdt) KVM_X86_OP(set_gdt) KVM_X86_OP(sync_dirty_debug_regs) -KVM_X86_OP(set_dr6) KVM_X86_OP(set_dr7) KVM_X86_OP(cache_reg) KVM_X86_OP(get_rflags) @@ -102,7 +101,6 @@ KVM_X86_OP(write_tsc_multiplier) KVM_X86_OP(get_exit_info) KVM_X86_OP(check_intercept) KVM_X86_OP(handle_exit_irqoff) -KVM_X86_OP(request_immediate_exit) KVM_X86_OP(sched_in) KVM_X86_OP_OPTIONAL(update_cpu_dirty_logging) KVM_X86_OP_OPTIONAL(vcpu_blocking) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 5dfb8cc9616e..813887324d52 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -733,6 +733,7 @@ struct kvm_vcpu_arch { u32 pkru; u32 hflags; u64 efer; + u64 host_debugctl; u64 apic_base; struct kvm_lapic *apic; /* kernel irqchip context */ bool load_eoi_exitmap_pending; @@ -1549,6 +1550,12 @@ static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical) return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL; }
+enum kvm_x86_run_flags { + KVM_RUN_FORCE_IMMEDIATE_EXIT = BIT(0), + KVM_RUN_LOAD_GUEST_DR6 = BIT(1), + KVM_RUN_LOAD_DEBUGCTL = BIT(2), +}; + struct kvm_x86_ops { const char *name;
@@ -1574,6 +1581,12 @@ struct kvm_x86_ops { void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); void (*vcpu_put)(struct kvm_vcpu *vcpu);
+ /* + * Mask of DEBUGCTL bits that are owned by the host, i.e. that need to + * match the host's value even while the guest is active. + */ + const u64 HOST_OWNED_DEBUGCTL; + void (*update_exception_bitmap)(struct kvm_vcpu *vcpu); int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); @@ -1595,7 +1608,6 @@ struct kvm_x86_ops { void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu); - void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value); void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value); void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); @@ -1623,7 +1635,8 @@ struct kvm_x86_ops { void (*flush_tlb_guest)(struct kvm_vcpu *vcpu);
int (*vcpu_pre_run)(struct kvm_vcpu *vcpu); - enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu); + enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu, + u64 run_flags); int (*handle_exit)(struct kvm_vcpu *vcpu, enum exit_fastpath_completion exit_fastpath); int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); @@ -1657,7 +1670,7 @@ struct kvm_x86_ops { bool allow_apicv_in_x2apic_without_x2apic_virtualization; void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); - void (*hwapic_isr_update)(int isr); + void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu); void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); @@ -1693,8 +1706,6 @@ struct kvm_x86_ops { struct x86_exception *exception); void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
- void (*request_immediate_exit)(struct kvm_vcpu *vcpu); - void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
/* @@ -2180,7 +2191,6 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu); int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); -void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 033855457581..723e48b57bd0 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -380,6 +380,7 @@ #define DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI (1UL << 12) #define DEBUGCTLMSR_FREEZE_IN_SMM_BIT 14 #define DEBUGCTLMSR_FREEZE_IN_SMM (1UL << DEBUGCTLMSR_FREEZE_IN_SMM_BIT) +#define DEBUGCTLMSR_RTM_DEBUG BIT(15)
#define MSR_PEBS_FRONTEND 0x000003f7
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 97771b9d33af..2759524b8ffc 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -94,12 +94,13 @@ DECLARE_STATIC_CALL(xen_hypercall, xen_hypercall_func); #ifdef MODULE #define __ADDRESSABLE_xen_hypercall #else -#define __ADDRESSABLE_xen_hypercall __ADDRESSABLE_ASM_STR(__SCK__xen_hypercall) +#define __ADDRESSABLE_xen_hypercall \ + __stringify(.global STATIC_CALL_KEY(xen_hypercall);) #endif
#define __HYPERCALL \ __ADDRESSABLE_xen_hypercall \ - "call __SCT__xen_hypercall" + __stringify(call STATIC_CALL_TRAMP(xen_hypercall))
#define __HYPERCALL_ENTRY(x) "a" (x)
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index c4d5ac99c6af..332c6f24280d 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -71,10 +71,9 @@ void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
static void __init set_return_thunk(void *thunk) { - if (x86_return_thunk != __x86_return_thunk) - pr_warn("x86/bugs: return thunk changed\n"); - x86_return_thunk = thunk; + + pr_info("active return thunk: %ps\n", thunk); }
/* Update SPEC_CTRL MSR and its cached copy unconditionally */ diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 6e738759779e..5c594781b463 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -15,6 +15,7 @@ #include <asm/cacheinfo.h> #include <asm/spec-ctrl.h> #include <asm/delay.h> +#include <asm/resctrl.h>
#include "cpu.h"
@@ -240,6 +241,8 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c) x86_amd_ls_cfg_ssbd_mask = 1ULL << 10; } } + + resctrl_cpu_detect(c); }
static void early_init_hygon(struct cpuinfo_x86 *c) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index bd3fbd5be5da..223f4fa6a849 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1929,6 +1929,9 @@ int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu) if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY) goto out_flush_all;
+ if (is_noncanonical_address(entries[i], vcpu)) + continue; + /* * Lower 12 bits of 'address' encode the number of additional * pages to flush. diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 66c7f2367bb3..ba1c2a7f74f7 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -750,7 +750,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic) * just set SVI. */ if (unlikely(apic->apicv_active)) - static_call_cond(kvm_x86_hwapic_isr_update)(vec); + static_call_cond(kvm_x86_hwapic_isr_update)(apic->vcpu, vec); else { ++apic->isr_count; BUG_ON(apic->isr_count > MAX_APIC_VECTOR); @@ -795,7 +795,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) * and must be left alone. */ if (unlikely(apic->apicv_active)) - static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic)); + static_call_cond(kvm_x86_hwapic_isr_update)(apic->vcpu, apic_find_highest_isr(apic)); else { --apic->isr_count; BUG_ON(apic->isr_count < 0); @@ -803,6 +803,17 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) } }
+void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + + if (WARN_ON_ONCE(!lapic_in_kernel(vcpu)) || !apic->apicv_active) + return; + + static_call(kvm_x86_hwapic_isr_update)(vcpu, apic_find_highest_isr(apic)); +} +EXPORT_SYMBOL_GPL(kvm_apic_update_hwapic_isr); + int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) { /* This may race with setting of irr in __apic_accept_irq() and @@ -2772,7 +2783,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) if (apic->apicv_active) { static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu); static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, -1); - static_call_cond(kvm_x86_hwapic_isr_update)(-1); + static_call_cond(kvm_x86_hwapic_isr_update)(vcpu, -1); }
vcpu->arch.apic_arb_prio = 0; @@ -3072,7 +3083,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) if (apic->apicv_active) { static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu); static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic)); - static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic)); + static_call_cond(kvm_x86_hwapic_isr_update)(vcpu, apic_find_highest_isr(apic)); } kvm_make_request(KVM_REQ_EVENT, vcpu); if (ioapic_in_kernel(vcpu->kvm)) diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 0a0ea4b5dd8c..0dd069b8d6d1 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -124,6 +124,7 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info); int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s); int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s); enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu); +void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu); int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 86c50747e158..abff6d45ae33 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4157,6 +4157,9 @@ static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) { + if (is_guest_mode(vcpu)) + return EXIT_FASTPATH_NONE; + if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && to_svm(vcpu)->vmcb->control.exit_info_1) return handle_fastpath_set_msr_irqoff(vcpu); @@ -4170,6 +4173,18 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
guest_state_enter_irqoff();
+ /* + * Set RFLAGS.IF prior to VMRUN, as the host's RFLAGS.IF at the time of + * VMRUN controls whether or not physical IRQs are masked (KVM always + * runs with V_INTR_MASKING_MASK). Toggle RFLAGS.IF here to avoid the + * temptation to do STI+VMRUN+CLI, as AMD CPUs bleed the STI shadow + * into guest state if delivery of an event during VMRUN triggers a + * #VMEXIT, and the guest_state transitions already tell lockdep that + * IRQs are being enabled/disabled. Note! GIF=0 for the entirety of + * this path, so IRQs aren't actually unmasked while running host code. + */ + raw_local_irq_enable(); + amd_clear_divider();
if (sev_es_guest(vcpu->kvm)) @@ -4177,15 +4192,18 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in else __svm_vcpu_run(svm, spec_ctrl_intercepted);
+ raw_local_irq_disable(); + guest_state_exit_irqoff(); }
-static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) +static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags) { + bool force_immediate_exit = run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT; struct vcpu_svm *svm = to_svm(vcpu); bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL);
- trace_kvm_entry(vcpu); + trace_kvm_entry(vcpu, force_immediate_exit);
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; @@ -4204,9 +4222,12 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) * is enough to force an immediate vmexit. */ disable_nmi_singlestep(svm); - smp_send_reschedule(vcpu->cpu); + force_immediate_exit = true; }
+ if (force_immediate_exit) + smp_send_reschedule(vcpu->cpu); + pre_svm_run(vcpu);
sync_lapic_to_cr8(vcpu); @@ -4220,10 +4241,13 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) svm_hv_update_vp_id(svm->vmcb, vcpu);
/* - * Run with all-zero DR6 unless needed, so that we can get the exact cause - * of a #DB. + * Run with all-zero DR6 unless the guest can write DR6 freely, so that + * KVM can get the exact cause of a #DB. Note, loading guest DR6 from + * KVM's snapshot is only necessary when DR accesses won't exit. */ - if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))) + if (unlikely(run_flags & KVM_RUN_LOAD_GUEST_DR6)) + svm_set_dr6(vcpu, vcpu->arch.dr6); + else if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))) svm_set_dr6(vcpu, DR6_ACTIVE_LOW);
clgi(); @@ -4300,9 +4324,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
svm_complete_interrupts(vcpu);
- if (is_guest_mode(vcpu)) - return EXIT_FASTPATH_NONE; - return svm_exit_handlers_fastpath(vcpu); }
@@ -5003,7 +5024,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .set_idt = svm_set_idt, .get_gdt = svm_get_gdt, .set_gdt = svm_set_gdt, - .set_dr6 = svm_set_dr6, .set_dr7 = svm_set_dr7, .sync_dirty_debug_regs = svm_sync_dirty_debug_regs, .cache_reg = svm_cache_reg, @@ -5060,8 +5080,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .check_intercept = svm_check_intercept, .handle_exit_irqoff = svm_handle_exit_irqoff,
- .request_immediate_exit = __kvm_request_immediate_exit, - .sched_in = svm_sched_in,
.nested_ops = &svm_nested_ops, diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 56fe34d9397f..81ecb9e1101d 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -171,12 +171,8 @@ SYM_FUNC_START(__svm_vcpu_run) VM_CLEAR_CPU_BUFFERS
/* Enter guest mode */ - sti - 3: vmrun %_ASM_AX 4: - cli - /* Pop @svm to RAX while it's the only available register. */ pop %_ASM_AX
@@ -341,11 +337,8 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) VM_CLEAR_CPU_BUFFERS
/* Enter guest mode */ - sti - 1: vmrun %_ASM_AX - -2: cli +2:
/* Pop @svm to RDI, guest registers have been saved already. */ pop %_ASM_DI diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index b82e6ed4f024..c6b4b1728006 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -15,20 +15,23 @@ * Tracepoint for guest mode entry. */ TRACE_EVENT(kvm_entry, - TP_PROTO(struct kvm_vcpu *vcpu), - TP_ARGS(vcpu), + TP_PROTO(struct kvm_vcpu *vcpu, bool force_immediate_exit), + TP_ARGS(vcpu, force_immediate_exit),
TP_STRUCT__entry( __field( unsigned int, vcpu_id ) __field( unsigned long, rip ) + __field( bool, immediate_exit ) ),
TP_fast_assign( __entry->vcpu_id = vcpu->vcpu_id; __entry->rip = kvm_rip_read(vcpu); + __entry->immediate_exit = force_immediate_exit; ),
- TP_printk("vcpu %u, rip 0x%lx", __entry->vcpu_id, __entry->rip) + TP_printk("vcpu %u, rip 0x%lx%s", __entry->vcpu_id, __entry->rip, + __entry->immediate_exit ? "[immediate exit]" : "") );
/* diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index d3e346a574f1..d2fa192d7ce7 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2564,10 +2564,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, if (vmx->nested.nested_run_pending && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); - vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); + vmx_guest_debugctl_write(vcpu, vmcs12->guest_ia32_debugctl & + vmx_get_supported_debugctl(vcpu, false)); } else { kvm_set_dr(vcpu, 7, vcpu->arch.dr7); - vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.pre_vmenter_debugctl); + vmx_guest_debugctl_write(vcpu, vmx->nested.pre_vmenter_debugctl); } if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) @@ -3045,7 +3046,8 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, return -EINVAL;
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) && - CC(!kvm_dr7_valid(vmcs12->guest_dr7))) + (CC(!kvm_dr7_valid(vmcs12->guest_dr7)) || + CC(!vmx_is_valid_debugctl(vcpu, vmcs12->guest_ia32_debugctl, false)))) return -EINVAL;
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) && @@ -3431,7 +3433,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
if (!vmx->nested.nested_run_pending || !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) - vmx->nested.pre_vmenter_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); + vmx->nested.pre_vmenter_debugctl = vmx_guest_debugctl_read(); if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) @@ -4435,6 +4437,12 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
+ /* + * Note! Save DR7, but intentionally don't grab DEBUGCTL from vmcs02. + * Writes to DEBUGCTL that aren't intercepted by L1 are immediately + * propagated to vmcs12 (see vmx_set_msr()), as the value loaded into + * vmcs02 doesn't strictly track vmcs12. + */ if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
@@ -4625,7 +4633,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, __vmx_set_segment(vcpu, &seg, VCPU_SREG_LDTR);
kvm_set_dr(vcpu, 7, 0x400); - vmcs_write64(GUEST_IA32_DEBUGCTL, 0); + vmx_guest_debugctl_write(vcpu, 0);
if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, vmcs12->vm_exit_msr_load_count)) @@ -4680,6 +4688,9 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7))); }
+ /* Reload DEBUGCTL to ensure vmcs01 has a fresh FREEZE_IN_SMM value. */ + vmx_reload_guest_debugctl(vcpu); + /* * Note that calling vmx_set_{efer,cr0,cr4} is important as they * handle a variety of side effects to KVM's software model. @@ -4900,6 +4911,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); }
+ if (vmx->nested.update_vmcs01_hwapic_isr) { + vmx->nested.update_vmcs01_hwapic_isr = false; + kvm_apic_update_hwapic_isr(vcpu); + } + if ((vm_exit_reason != -1) && (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))) vmx->nested.need_vmcs12_to_shadow_sync = true; diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 48a2f77f62ef..50364e00e4e9 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -633,11 +633,11 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu) */ static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu) { - u64 data = vmcs_read64(GUEST_IA32_DEBUGCTL); + u64 data = vmx_guest_debugctl_read();
if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) { data &= ~DEBUGCTLMSR_LBR; - vmcs_write64(GUEST_IA32_DEBUGCTL, data); + vmx_guest_debugctl_write(vcpu, data); } }
@@ -707,7 +707,7 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
if (!lbr_desc->event) { vmx_disable_lbr_msrs_passthrough(vcpu); - if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR) + if (vmx_guest_debugctl_read() & DEBUGCTLMSR_LBR) goto warn; if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use)) goto warn; @@ -729,7 +729,7 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
static void intel_pmu_cleanup(struct kvm_vcpu *vcpu) { - if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)) + if (!(vmx_guest_debugctl_read() & DEBUGCTLMSR_LBR)) intel_pmu_release_guest_lbr_event(vcpu); }
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index e53620e18925..9b1f22bcb716 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -49,6 +49,8 @@ #include <asm/spec-ctrl.h> #include <asm/vmx.h>
+#include <trace/events/ipi.h> + #include "capabilities.h" #include "cpuid.h" #include "hyperv.h" @@ -1304,8 +1306,6 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) u16 fs_sel, gs_sel; int i;
- vmx->req_immediate_exit = false; - /* * Note that guest MSRs to be saved/restored can also be changed * when guest state is loaded. This happens when guest transitions @@ -1499,13 +1499,9 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, */ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { - struct vcpu_vmx *vmx = to_vmx(vcpu); - vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
vmx_vcpu_pi_load(vcpu, cpu); - - vmx->host_debugctlmsr = get_debugctlmsr(); }
static void vmx_vcpu_put(struct kvm_vcpu *vcpu) @@ -2128,7 +2124,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = vmx->pt_desc.guest.addr_a[index / 2]; break; case MSR_IA32_DEBUGCTLMSR: - msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL); + msr_info->data = vmx_guest_debugctl_read(); break; default: find_uret_msr: @@ -2153,7 +2149,7 @@ static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu, return (unsigned long)data; }
-static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated) +u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated) { u64 debugctl = 0;
@@ -2165,9 +2161,25 @@ static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated (host_initiated || intel_pmu_lbr_is_enabled(vcpu))) debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
+ if (boot_cpu_has(X86_FEATURE_RTM) && + (host_initiated || guest_cpuid_has(vcpu, X86_FEATURE_RTM))) + debugctl |= DEBUGCTLMSR_RTM_DEBUG; + return debugctl; }
+bool vmx_is_valid_debugctl(struct kvm_vcpu *vcpu, u64 data, bool host_initiated) +{ + u64 invalid; + + invalid = data & ~vmx_get_supported_debugctl(vcpu, host_initiated); + if (invalid & (DEBUGCTLMSR_BTF | DEBUGCTLMSR_LBR)) { + kvm_pr_unimpl_wrmsr(vcpu, MSR_IA32_DEBUGCTLMSR, data); + invalid &= ~(DEBUGCTLMSR_BTF | DEBUGCTLMSR_LBR); + } + return !invalid; +} + /* * Writes msr value into the appropriate "register". * Returns 0 on success, non-0 otherwise. @@ -2236,29 +2248,22 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } vmcs_writel(GUEST_SYSENTER_ESP, data); break; - case MSR_IA32_DEBUGCTLMSR: { - u64 invalid; - - invalid = data & ~vmx_get_supported_debugctl(vcpu, msr_info->host_initiated); - if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) { - kvm_pr_unimpl_wrmsr(vcpu, msr_index, data); - data &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR); - invalid &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR); - } - - if (invalid) + case MSR_IA32_DEBUGCTLMSR: + if (!vmx_is_valid_debugctl(vcpu, data, msr_info->host_initiated)) return 1;
+ data &= vmx_get_supported_debugctl(vcpu, msr_info->host_initiated); + if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) get_vmcs12(vcpu)->guest_ia32_debugctl = data;
- vmcs_write64(GUEST_IA32_DEBUGCTL, data); + vmx_guest_debugctl_write(vcpu, data); + if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event && (data & DEBUGCTLMSR_LBR)) intel_pmu_create_guest_lbr_event(vcpu); return 0; - } case MSR_IA32_BNDCFGS: if (!kvm_mpx_supported() || (!msr_info->host_initiated && @@ -4822,7 +4827,8 @@ static void init_vmcs(struct vcpu_vmx *vmx) vmcs_write32(GUEST_SYSENTER_CS, 0); vmcs_writel(GUEST_SYSENTER_ESP, 0); vmcs_writel(GUEST_SYSENTER_EIP, 0); - vmcs_write64(GUEST_IA32_DEBUGCTL, 0); + + vmx_guest_debugctl_write(&vmx->vcpu, 0);
if (cpu_has_vmx_tpr_shadow()) { vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); @@ -5620,12 +5626,6 @@ static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) set_debugreg(DR6_RESERVED, 6); }
-static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val) -{ - lockdep_assert_irqs_disabled(); - set_debugreg(vcpu->arch.dr6, 6); -} - static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) { vmcs_writel(GUEST_DR7, val); @@ -6019,22 +6019,46 @@ static int handle_pml_full(struct kvm_vcpu *vcpu) return 1; }
-static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu) +static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu, + bool force_immediate_exit) { struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (!vmx->req_immediate_exit && - !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) { - kvm_lapic_expired_hv_timer(vcpu); + /* + * In the *extremely* unlikely scenario that this is a spurious VM-Exit + * due to the timer expiring while it was "soft" disabled, just eat the + * exit and re-enter the guest. + */ + if (unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) return EXIT_FASTPATH_REENTER_GUEST; - }
- return EXIT_FASTPATH_NONE; + /* + * If the timer expired because KVM used it to force an immediate exit, + * then mission accomplished. + */ + if (force_immediate_exit) + return EXIT_FASTPATH_EXIT_HANDLED; + + /* + * If L2 is active, go down the slow path as emulating the guest timer + * expiration likely requires synthesizing a nested VM-Exit. + */ + if (is_guest_mode(vcpu)) + return EXIT_FASTPATH_NONE; + + kvm_lapic_expired_hv_timer(vcpu); + return EXIT_FASTPATH_REENTER_GUEST; }
static int handle_preemption_timer(struct kvm_vcpu *vcpu) { - handle_fastpath_preemption_timer(vcpu); + /* + * This non-fastpath handler is reached if and only if the preemption + * timer was being used to emulate a guest timer while L2 is active. + * All other scenarios are supposed to be handled in the fastpath. + */ + WARN_ON_ONCE(!is_guest_mode(vcpu)); + kvm_lapic_expired_hv_timer(vcpu); return 1; }
@@ -6834,11 +6858,27 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu) kvm_release_pfn_clean(pfn); }
-static void vmx_hwapic_isr_update(int max_isr) +static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) { u16 status; u8 old;
+ /* + * If L2 is active, defer the SVI update until vmcs01 is loaded, as SVI + * is only relevant for if and only if Virtual Interrupt Delivery is + * enabled in vmcs12, and if VID is enabled then L2 EOIs affect L2's + * vAPIC, not L1's vAPIC. KVM must update vmcs01 on the next nested + * VM-Exit, otherwise L1 with run with a stale SVI. + */ + if (is_guest_mode(vcpu)) { + /* + * KVM is supposed to forward intercepted L2 EOIs to L1 if VID + * is enabled in vmcs12; as above, the EOIs affect L2's vAPIC. + */ + to_vmx(vcpu)->nested.update_vmcs01_hwapic_isr = true; + return; + } + if (max_isr == -1) max_isr = 0;
@@ -7175,13 +7215,13 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) msrs[i].host, false); }
-static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) +static void vmx_update_hv_timer(struct kvm_vcpu *vcpu, bool force_immediate_exit) { struct vcpu_vmx *vmx = to_vmx(vcpu); u64 tscl; u32 delta_tsc;
- if (vmx->req_immediate_exit) { + if (force_immediate_exit) { vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, 0); vmx->loaded_vmcs->hv_timer_soft_disabled = false; } else if (vmx->hv_deadline_tsc != -1) { @@ -7234,13 +7274,22 @@ void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, barrier_nospec(); }
-static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) +static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu, + bool force_immediate_exit) { + /* + * If L2 is active, some VMX preemption timer exits can be handled in + * the fastpath even, all other exits must use the slow path. + */ + if (is_guest_mode(vcpu) && + to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_PREEMPTION_TIMER) + return EXIT_FASTPATH_NONE; + switch (to_vmx(vcpu)->exit_reason.basic) { case EXIT_REASON_MSR_WRITE: return handle_fastpath_set_msr_irqoff(vcpu); case EXIT_REASON_PREEMPTION_TIMER: - return handle_fastpath_preemption_timer(vcpu); + return handle_fastpath_preemption_timer(vcpu, force_immediate_exit); default: return EXIT_FASTPATH_NONE; } @@ -7300,8 +7349,9 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, guest_state_exit_irqoff(); }
-static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) +static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags) { + bool force_immediate_exit = run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT; struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long cr3, cr4;
@@ -7327,7 +7377,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) return EXIT_FASTPATH_NONE; }
- trace_kvm_entry(vcpu); + trace_kvm_entry(vcpu, force_immediate_exit);
if (vmx->ple_window_dirty) { vmx->ple_window_dirty = false; @@ -7346,6 +7396,12 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); vcpu->arch.regs_dirty = 0;
+ if (run_flags & KVM_RUN_LOAD_GUEST_DR6) + set_debugreg(vcpu->arch.dr6, 6); + + if (run_flags & KVM_RUN_LOAD_DEBUGCTL) + vmx_reload_guest_debugctl(vcpu); + /* * Refresh vmcs.HOST_CR3 if necessary. This must be done immediately * prior to VM-Enter, as the kernel may load a new ASID (PCID) any time @@ -7382,7 +7438,9 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx_passthrough_lbr_msrs(vcpu);
if (enable_preemption_timer) - vmx_update_hv_timer(vcpu); + vmx_update_hv_timer(vcpu, force_immediate_exit); + else if (force_immediate_exit) + smp_send_reschedule(vcpu->cpu);
kvm_wait_lapic_expire(vcpu);
@@ -7398,8 +7456,8 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) }
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ - if (vmx->host_debugctlmsr) - update_debugctlmsr(vmx->host_debugctlmsr); + if (vcpu->arch.host_debugctl) + update_debugctlmsr(vcpu->arch.host_debugctl);
#ifndef CONFIG_X86_64 /* @@ -7446,10 +7504,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx_recover_nmi_blocking(vmx); vmx_complete_interrupts(vmx);
- if (is_guest_mode(vcpu)) - return EXIT_FASTPATH_NONE; - - return vmx_exit_handlers_fastpath(vcpu); + return vmx_exit_handlers_fastpath(vcpu, force_immediate_exit); }
static void vmx_vcpu_free(struct kvm_vcpu *vcpu) @@ -7948,11 +8003,6 @@ static __init void vmx_set_cpu_caps(void) kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG); }
-static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) -{ - to_vmx(vcpu)->req_immediate_exit = true; -} - static int vmx_check_intercept_io(struct kvm_vcpu *vcpu, struct x86_instruction_info *info) { @@ -8279,6 +8329,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .vcpu_load = vmx_vcpu_load, .vcpu_put = vmx_vcpu_put,
+ .HOST_OWNED_DEBUGCTL = DEBUGCTLMSR_FREEZE_IN_SMM, + .update_exception_bitmap = vmx_update_exception_bitmap, .get_msr_feature = vmx_get_msr_feature, .get_msr = vmx_get_msr, @@ -8297,7 +8349,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .set_idt = vmx_set_idt, .get_gdt = vmx_get_gdt, .set_gdt = vmx_set_gdt, - .set_dr6 = vmx_set_dr6, .set_dr7 = vmx_set_dr7, .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs, .cache_reg = vmx_cache_reg, @@ -8364,8 +8415,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .check_intercept = vmx_check_intercept, .handle_exit_irqoff = vmx_handle_exit_irqoff,
- .request_immediate_exit = vmx_request_immediate_exit, - .sched_in = vmx_sched_in,
.cpu_dirty_log_size = PML_ENTITY_NUM, @@ -8623,7 +8672,6 @@ static __init int hardware_setup(void) if (!enable_preemption_timer) { vmx_x86_ops.set_hv_timer = NULL; vmx_x86_ops.cancel_hv_timer = NULL; - vmx_x86_ops.request_immediate_exit = __kvm_request_immediate_exit; }
kvm_caps.supported_mce_cap |= MCG_LMCE_P; diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 6be1627d888e..5d73d3e570d7 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -177,6 +177,7 @@ struct nested_vmx { bool reload_vmcs01_apic_access_page; bool update_vmcs01_cpu_dirty_logging; bool update_vmcs01_apicv_status; + bool update_vmcs01_hwapic_isr;
/* * Enlightened VMCS has been enabled. It does not mean that L1 has to @@ -330,8 +331,6 @@ struct vcpu_vmx { unsigned int ple_window; bool ple_window_dirty;
- bool req_immediate_exit; - /* Support for PML */ #define PML_ENTITY_NUM 512 struct page *pml_pg; @@ -339,8 +338,6 @@ struct vcpu_vmx { /* apic deadline value in host tsc */ u64 hv_deadline_tsc;
- unsigned long host_debugctlmsr; - /* * Only bits masked by msr_ia32_feature_control_valid_bits can be set in * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included @@ -432,6 +429,32 @@ static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
+u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated); +bool vmx_is_valid_debugctl(struct kvm_vcpu *vcpu, u64 data, bool host_initiated); + +static inline void vmx_guest_debugctl_write(struct kvm_vcpu *vcpu, u64 val) +{ + WARN_ON_ONCE(val & DEBUGCTLMSR_FREEZE_IN_SMM); + + val |= vcpu->arch.host_debugctl & DEBUGCTLMSR_FREEZE_IN_SMM; + vmcs_write64(GUEST_IA32_DEBUGCTL, val); +} + +static inline u64 vmx_guest_debugctl_read(void) +{ + return vmcs_read64(GUEST_IA32_DEBUGCTL) & ~DEBUGCTLMSR_FREEZE_IN_SMM; +} + +static inline void vmx_reload_guest_debugctl(struct kvm_vcpu *vcpu) +{ + u64 val = vmcs_read64(GUEST_IA32_DEBUGCTL); + + if (!((val ^ vcpu->arch.host_debugctl) & DEBUGCTLMSR_FREEZE_IN_SMM)) + return; + + vmx_guest_debugctl_write(vcpu, val & ~DEBUGCTLMSR_FREEZE_IN_SMM); +} + /* * Note, early Intel manuals have the write-low and read-high bitmap offsets * the wrong way round. The bitmaps control MSRs 0x00000000-0x00001fff and diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 55185670e0e5..af0b2b3bc991 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10505,12 +10505,6 @@ static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) static_call_cond(kvm_x86_set_apic_access_page_addr)(vcpu); }
-void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) -{ - smp_send_reschedule(vcpu->cpu); -} -EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit); - /* * Called within kvm->srcu read side. * Returns 1 to let vcpu_run() continue the guest execution loop without @@ -10524,6 +10518,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) dm_request_for_irq_injection(vcpu) && kvm_cpu_accept_dm_intr(vcpu); fastpath_t exit_fastpath; + u64 run_flags, debug_ctl;
bool req_immediate_exit = false;
@@ -10756,9 +10751,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) goto cancel_injection; }
+ run_flags = 0; if (req_immediate_exit) { + run_flags |= KVM_RUN_FORCE_IMMEDIATE_EXIT; kvm_make_request(KVM_REQ_EVENT, vcpu); - static_call(kvm_x86_request_immediate_exit)(vcpu); }
fpregs_assert_state_consistent(); @@ -10776,11 +10772,23 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) set_debugreg(vcpu->arch.eff_db[3], 3); /* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */ if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) - static_call(kvm_x86_set_dr6)(vcpu, vcpu->arch.dr6); + run_flags |= KVM_RUN_LOAD_GUEST_DR6; } else if (unlikely(hw_breakpoint_active())) { set_debugreg(0, 7); }
+ /* + * Refresh the host DEBUGCTL snapshot after disabling IRQs, as DEBUGCTL + * can be modified in IRQ context, e.g. via SMP function calls. Inform + * vendor code if any host-owned bits were changed, e.g. so that the + * value loaded into hardware while running the guest can be updated. + */ + debug_ctl = get_debugctlmsr(); + if ((debug_ctl ^ vcpu->arch.host_debugctl) & kvm_x86_ops.HOST_OWNED_DEBUGCTL && + !vcpu->arch.guest_state_protected) + run_flags |= KVM_RUN_LOAD_DEBUGCTL; + vcpu->arch.host_debugctl = debug_ctl; + guest_timing_enter_irqoff();
for (;;) { @@ -10793,7 +10801,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) && (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED));
- exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu); + exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu, run_flags); if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) break;
@@ -10805,6 +10813,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) break; }
+ run_flags = 0; + /* Note, VM-Exits that go down the "slow" path are accounted below. */ ++vcpu->stat.exits; } @@ -13256,16 +13266,22 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, { struct kvm_kernel_irqfd *irqfd = container_of(cons, struct kvm_kernel_irqfd, consumer); + struct kvm *kvm = irqfd->kvm; int ret;
- irqfd->producer = prod; kvm_arch_start_assignment(irqfd->kvm); + + spin_lock_irq(&kvm->irqfds.lock); + irqfd->producer = prod; + ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 1); - if (ret) kvm_arch_end_assignment(irqfd->kvm);
+ spin_unlock_irq(&kvm->irqfds.lock); + + return ret; }
@@ -13275,9 +13291,9 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, int ret; struct kvm_kernel_irqfd *irqfd = container_of(cons, struct kvm_kernel_irqfd, consumer); + struct kvm *kvm = irqfd->kvm;
WARN_ON(irqfd->producer != prod); - irqfd->producer = NULL;
/* * When producer of consumer is unregistered, we change back to @@ -13285,11 +13301,17 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, * when the irq is masked/disabled or the consumer side (KVM * int this case doesn't want to receive the interrupts. */ + spin_lock_irq(&kvm->irqfds.lock); + irqfd->producer = NULL; + ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 0); if (ret) printk(KERN_INFO "irq bypass consumer (token %p) unregistration" " fails: %d\n", irqfd->consumer.token, ret);
+ spin_unlock_irq(&kvm->irqfds.lock); + + kvm_arch_end_assignment(irqfd->kvm); }
diff --git a/block/blk-core.c b/block/blk-core.c index 4f25d2c4bc70..923b7d91e6dc 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -775,6 +775,15 @@ void submit_bio_noacct(struct bio *bio) bio_clear_polled(bio);
switch (bio_op(bio)) { + case REQ_OP_READ: + case REQ_OP_WRITE: + break; + case REQ_OP_FLUSH: + /* + * REQ_OP_FLUSH can't be submitted through bios, it is only + * synthetized in struct request by the flush state machine. + */ + goto not_supported; case REQ_OP_DISCARD: if (!bdev_max_discard_sectors(bdev)) goto not_supported; @@ -788,6 +797,10 @@ void submit_bio_noacct(struct bio *bio) if (status != BLK_STS_OK) goto end_io; break; + case REQ_OP_WRITE_ZEROES: + if (!q->limits.max_write_zeroes_sectors) + goto not_supported; + break; case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_OPEN: case REQ_OP_ZONE_CLOSE: @@ -799,12 +812,15 @@ void submit_bio_noacct(struct bio *bio) if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q)) goto not_supported; break; - case REQ_OP_WRITE_ZEROES: - if (!q->limits.max_write_zeroes_sectors) - goto not_supported; - break; + case REQ_OP_DRV_IN: + case REQ_OP_DRV_OUT: + /* + * Driver private operations are only used with passthrough + * requests. + */ + fallthrough; default: - break; + goto not_supported; }
if (blk_throtl_bio(bio)) diff --git a/block/blk-settings.c b/block/blk-settings.c index 7019b8e204d9..021994f6d2d8 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -634,7 +634,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, }
/* chunk_sectors a multiple of the physical block size? */ - if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { + if (t->chunk_sectors % (t->physical_block_size >> SECTOR_SHIFT)) { t->chunk_sectors = 0; t->misaligned = 1; ret = -1; diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index 7d1463a1562a..dd05faf00571 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -134,7 +134,7 @@ int jent_hash_time(void *hash_state, __u64 time, u8 *addtl, * Inject the data from the previous loop into the pool. This data is * not considered to contain any entropy, but it stirs the pool a bit. */ - ret = crypto_shash_update(desc, intermediary, sizeof(intermediary)); + ret = crypto_shash_update(hash_state_desc, intermediary, sizeof(intermediary)); if (ret) goto err;
@@ -147,11 +147,12 @@ int jent_hash_time(void *hash_state, __u64 time, u8 *addtl, * conditioning operation to have an identical amount of input data * according to section 3.1.5. */ - if (!stuck) { - ret = crypto_shash_update(hash_state_desc, (u8 *)&time, - sizeof(__u64)); + if (stuck) { + time = 0; }
+ ret = crypto_shash_update(hash_state_desc, (u8 *)&time, sizeof(__u64)); + err: shash_desc_zero(desc); memzero_explicit(intermediary, sizeof(intermediary)); diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 7053f1b9fc1d..c0f9cf9768ea 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -250,7 +250,7 @@ static inline int acpi_processor_hotadd_init(struct acpi_processor *pr)
static int acpi_processor_get_info(struct acpi_device *device) { - union acpi_object object = { 0 }; + union acpi_object object = { .processor = { 0 } }; struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; struct acpi_processor *pr = acpi_driver_data(device); int device_declaration = 0; diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 2abf20736702..ec364c254112 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -715,6 +715,17 @@ static bool ghes_do_proc(struct ghes *ghes, } }
+ /* + * If no memory failure work is queued for abnormal synchronous + * errors, do a force kill. + */ + if (sync && !queued) { + dev_err(ghes->dev, + HW_ERR GHES_PFX "%s:%d: synchronous unrecoverable error (SIGBUS)\n", + current->comm, task_pid_nr(current)); + force_sig(SIGBUS); + } + return queued; }
@@ -901,6 +912,8 @@ static void __ghes_panic(struct ghes *ghes,
__ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); + ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
if (!panic_timeout) diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c index 98267f163e2b..aedf7e40145e 100644 --- a/drivers/acpi/pfr_update.c +++ b/drivers/acpi/pfr_update.c @@ -310,7 +310,7 @@ static bool applicable_image(const void *data, struct pfru_update_cap_info *cap, if (type == PFRU_CODE_INJECT_TYPE) return payload_hdr->rt_ver >= cap->code_rt_version;
- return payload_hdr->rt_ver >= cap->drv_rt_version; + return payload_hdr->svn_ver >= cap->drv_svn; }
static void print_update_debug_info(struct pfru_updated_result *result, diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c index a34f7d37877c..eb8f2a1ce138 100644 --- a/drivers/acpi/prmt.c +++ b/drivers/acpi/prmt.c @@ -85,8 +85,6 @@ static u64 efi_pa_va_lookup(efi_guid_t *guid, u64 pa) } }
- pr_warn("Failed to find VA for GUID: %pUL, PA: 0x%llx", guid, pa); - return 0; }
@@ -154,13 +152,37 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end) guid_copy(&th->guid, (guid_t *)handler_info->handler_guid); th->handler_addr = (void *)efi_pa_va_lookup(&th->guid, handler_info->handler_address); + /* + * Print a warning message if handler_addr is zero which is not expected to + * ever happen. + */ + if (unlikely(!th->handler_addr)) + pr_warn("Failed to find VA of handler for GUID: %pUL, PA: 0x%llx", + &th->guid, handler_info->handler_address);
th->static_data_buffer_addr = efi_pa_va_lookup(&th->guid, handler_info->static_data_buffer_address); + /* + * According to the PRM specification, static_data_buffer_address can be zero, + * so avoid printing a warning message in that case. Otherwise, if the + * return value of efi_pa_va_lookup() is zero, print the message. + */ + if (unlikely(!th->static_data_buffer_addr && handler_info->static_data_buffer_address)) + pr_warn("Failed to find VA of static data buffer for GUID: %pUL, PA: 0x%llx", + &th->guid, handler_info->static_data_buffer_address);
th->acpi_param_buffer_addr = efi_pa_va_lookup(&th->guid, handler_info->acpi_param_buffer_address);
+ /* + * According to the PRM specification, acpi_param_buffer_address can be zero, + * so avoid printing a warning message in that case. Otherwise, if the + * return value of efi_pa_va_lookup() is zero, print the message. + */ + if (unlikely(!th->acpi_param_buffer_addr && handler_info->acpi_param_buffer_address)) + pr_warn("Failed to find VA of acpi param buffer for GUID: %pUL, PA: 0x%llx", + &th->guid, handler_info->acpi_param_buffer_address); + } while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info)));
return 0; diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 4265814c74f8..d81f30ce2341 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -174,6 +174,9 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy) { unsigned int cpu;
+ if (ignore_ppc == 1) + return; + for_each_cpu(cpu, policy->related_cpus) { struct acpi_processor *pr = per_cpu(processors, cpu); int ret; @@ -194,6 +197,14 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy) if (ret < 0) pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu, ret); + + if (!pr->performance) + continue; + + ret = acpi_processor_get_platform_limit(pr); + if (ret) + pr_err("Failed to update freq constraint for CPU%d (%d)\n", + cpu, ret); } }
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 42b51c9812a0..188707d2970e 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -117,7 +117,7 @@ config SATA_AHCI
config SATA_MOBILE_LPM_POLICY int "Default SATA Link Power Management policy for low power chipsets" - range 0 4 + range 0 5 default 0 depends on SATA_AHCI help @@ -126,15 +126,32 @@ config SATA_MOBILE_LPM_POLICY chipsets are typically found on most laptops but desktops and servers now also widely use chipsets supporting low power modes.
- The value set has the following meanings: + Each policy combines power saving states and features: + - Partial: The Phy logic is powered but is in a reduced power + state. The exit latency from this state is no longer than + 10us). + - Slumber: The Phy logic is powered but is in an even lower power + state. The exit latency from this state is potentially + longer, but no longer than 10ms. + - DevSleep: The Phy logic may be powered down. The exit latency from + this state is no longer than 20 ms, unless otherwise + specified by DETO in the device Identify Device Data log. + - HIPM: Host Initiated Power Management (host automatically + transitions to partial and slumber). + - DIPM: Device Initiated Power Management (device automatically + transitions to partial and slumber). + + The possible values for the default SATA link power management + policies are: 0 => Keep firmware settings - 1 => Maximum performance - 2 => Medium power - 3 => Medium power with Device Initiated PM enabled - 4 => Minimum power - - Note "Minimum power" is known to cause issues, including disk - corruption, with some disks and should not be used. + 1 => No power savings (maximum performance) + 2 => HIPM (Partial) + 3 => HIPM (Partial) and DIPM (Partial and Slumber) + 4 => HIPM (Partial and DevSleep) and DIPM (Partial and Slumber) + 5 => HIPM (Slumber and DevSleep) and DIPM (Partial and Slumber) + + Excluding the value 0, higher values represent policies with higher + power savings.
config SATA_AHCI_PLATFORM tristate "Platform AHCI SATA support" diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c index be72030a500d..9e0a820d6961 100644 --- a/drivers/ata/libata-sata.c +++ b/drivers/ata/libata-sata.c @@ -817,6 +817,11 @@ static ssize_t ata_scsi_lpm_store(struct device *device,
spin_lock_irqsave(ap->lock, flags);
+ if (ap->flags & ATA_FLAG_NO_LPM) { + count = -EOPNOTSUPP; + goto out_unlock; + } + ata_for_each_link(link, ap, EDGE) { ata_for_each_dev(dev, &ap->link, ENABLED) { if (dev->horkage & ATA_HORKAGE_NOLPM) { diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 6a1460d35447..0b2f1e269ca4 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -856,18 +856,14 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, {0xFF, 0xFF, 0xFF, 0xFF}, // END mark }; static const unsigned char stat_table[][4] = { - /* Must be first because BUSY means no other bits valid */ - {0x80, ABORTED_COMMAND, 0x47, 0x00}, - // Busy, fake parity for now - {0x40, ILLEGAL_REQUEST, 0x21, 0x04}, - // Device ready, unaligned write command - {0x20, HARDWARE_ERROR, 0x44, 0x00}, - // Device fault, internal target failure - {0x08, ABORTED_COMMAND, 0x47, 0x00}, - // Timed out in xfer, fake parity for now - {0x04, RECOVERED_ERROR, 0x11, 0x00}, - // Recovered ECC error Medium error, recovered - {0xFF, 0xFF, 0xFF, 0xFF}, // END mark + /* Busy: must be first because BUSY means no other bits valid */ + { ATA_BUSY, ABORTED_COMMAND, 0x00, 0x00 }, + /* Device fault: INTERNAL TARGET FAILURE */ + { ATA_DF, HARDWARE_ERROR, 0x44, 0x00 }, + /* Corrected data error */ + { ATA_CORR, RECOVERED_ERROR, 0x00, 0x00 }, + + { 0xFF, 0xFF, 0xFF, 0xFF }, /* END mark */ };
/* @@ -939,6 +935,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) { ata_dev_dbg(dev, "missing result TF: can't generate ATA PT sense data\n"); + if (qc->err_mask) + ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0); return; }
@@ -996,8 +994,8 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) { ata_dev_dbg(dev, - "missing result TF: can't generate sense data\n"); - return; + "Missing result TF: reporting aborted command\n"); + goto aborted; }
/* Use ata_to_sense_error() to map status register bits @@ -1008,19 +1006,20 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc) ata_to_sense_error(qc->ap->print_id, tf->status, tf->error, &sense_key, &asc, &ascq); ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq); - } else { - /* Could not decode error */ - ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n", - tf->status, qc->err_mask); - ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0); - return; - }
- block = ata_tf_read_block(&qc->result_tf, dev); - if (block == U64_MAX) + block = ata_tf_read_block(&qc->result_tf, dev); + if (block != U64_MAX) + scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, + block); return; + }
- scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block); + /* Could not decode error */ + ata_dev_warn(dev, + "Could not decode error 0x%x, status 0x%x (err_mask=0x%x)\n", + tf->error, tf->status, qc->err_mask); +aborted: + ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0); }
void ata_scsi_sdev_config(struct scsi_device *sdev) @@ -3786,21 +3785,16 @@ static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc, /* Check cdl_ctrl */ switch (buf[0] & 0x03) { case 0: - /* Disable CDL if it is enabled */ - if (!(dev->flags & ATA_DFLAG_CDL_ENABLED)) - return 0; + /* Disable CDL */ ata_dev_dbg(dev, "Disabling CDL\n"); cdl_action = 0; dev->flags &= ~ATA_DFLAG_CDL_ENABLED; break; case 0x02: /* - * Enable CDL if not already enabled. Since this is mutually - * exclusive with NCQ priority, allow this only if NCQ priority - * is disabled. + * Enable CDL. Since CDL is mutually exclusive with NCQ + * priority, allow this only if NCQ priority is disabled. */ - if (dev->flags & ATA_DFLAG_CDL_ENABLED) - return 0; if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) { ata_dev_err(dev, "NCQ priority must be disabled to enable CDL\n"); diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index b741b5ba82bd..2aa0c6425290 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/rcupdate.h> #include <linux/sched.h> +#include <linux/units.h>
#define CREATE_TRACE_POINTS #include <trace/events/thermal_pressure.h> @@ -26,7 +27,8 @@ static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data); static struct cpumask scale_freq_counters_mask; static bool scale_freq_invariant; -static DEFINE_PER_CPU(u32, freq_factor) = 1; +DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 1; +EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref);
static bool supports_scale_freq_counters(const struct cpumask *cpus) { @@ -170,9 +172,9 @@ DEFINE_PER_CPU(unsigned long, thermal_pressure); * operating on stale data when hot-plug is used for some CPUs. The * @capped_freq reflects the currently allowed max CPUs frequency due to * thermal capping. It might be also a boost frequency value, which is bigger - * than the internal 'freq_factor' max frequency. In such case the pressure - * value should simply be removed, since this is an indication that there is - * no thermal throttling. The @capped_freq must be provided in kHz. + * than the internal 'capacity_freq_ref' max frequency. In such case the + * pressure value should simply be removed, since this is an indication that + * there is no thermal throttling. The @capped_freq must be provided in kHz. */ void topology_update_thermal_pressure(const struct cpumask *cpus, unsigned long capped_freq) @@ -183,10 +185,7 @@ void topology_update_thermal_pressure(const struct cpumask *cpus,
cpu = cpumask_first(cpus); max_capacity = arch_scale_cpu_capacity(cpu); - max_freq = per_cpu(freq_factor, cpu); - - /* Convert to MHz scale which is used in 'freq_factor' */ - capped_freq /= 1000; + max_freq = arch_scale_freq_ref(cpu);
/* * Handle properly the boost frequencies, which should simply clean @@ -279,13 +278,13 @@ void topology_normalize_cpu_scale(void)
capacity_scale = 1; for_each_possible_cpu(cpu) { - capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); + capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu); capacity_scale = max(capacity, capacity_scale); }
pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale); for_each_possible_cpu(cpu) { - capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); + capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu); capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, capacity_scale); topology_set_cpu_scale(cpu, capacity); @@ -321,15 +320,15 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) cpu_node, raw_capacity[cpu]);
/* - * Update freq_factor for calculating early boot cpu capacities. + * Update capacity_freq_ref for calculating early boot CPU capacities. * For non-clk CPU DVFS mechanism, there's no way to get the * frequency value now, assuming they are running at the same - * frequency (by keeping the initial freq_factor value). + * frequency (by keeping the initial capacity_freq_ref value). */ cpu_clk = of_clk_get(cpu_node, 0); if (!PTR_ERR_OR_ZERO(cpu_clk)) { - per_cpu(freq_factor, cpu) = - clk_get_rate(cpu_clk) / 1000; + per_cpu(capacity_freq_ref, cpu) = + clk_get_rate(cpu_clk) / HZ_PER_KHZ; clk_put(cpu_clk); } } else { @@ -345,11 +344,16 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) return !ret; }
+void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate) +{ +} + #ifdef CONFIG_ACPI_CPPC_LIB #include <acpi/cppc_acpi.h>
void topology_init_cpu_capacity_cppc(void) { + u64 capacity, capacity_scale = 0; struct cppc_perf_caps perf_caps; int cpu;
@@ -366,6 +370,10 @@ void topology_init_cpu_capacity_cppc(void) (perf_caps.highest_perf >= perf_caps.nominal_perf) && (perf_caps.highest_perf >= perf_caps.lowest_perf)) { raw_capacity[cpu] = perf_caps.highest_perf; + capacity_scale = max_t(u64, capacity_scale, raw_capacity[cpu]); + + per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]); + pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n", cpu, raw_capacity[cpu]); continue; @@ -376,7 +384,18 @@ void topology_init_cpu_capacity_cppc(void) goto exit; }
- topology_normalize_cpu_scale(); + for_each_possible_cpu(cpu) { + freq_inv_set_max_ratio(cpu, + per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); + + capacity = raw_capacity[cpu]; + capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, + capacity_scale); + topology_set_cpu_scale(cpu, capacity); + pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", + cpu, topology_get_cpu_scale(cpu)); + } + schedule_work(&update_topology_flags_work); pr_debug("cpu_capacity: cpu_capacity initialization done\n");
@@ -398,9 +417,6 @@ init_cpu_capacity_callback(struct notifier_block *nb, struct cpufreq_policy *policy = data; int cpu;
- if (!raw_capacity) - return 0; - if (val != CPUFREQ_CREATE_POLICY) return 0;
@@ -410,13 +426,18 @@ init_cpu_capacity_callback(struct notifier_block *nb,
cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
- for_each_cpu(cpu, policy->related_cpus) - per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000; + for_each_cpu(cpu, policy->related_cpus) { + per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq; + freq_inv_set_max_ratio(cpu, + per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); + }
if (cpumask_empty(cpus_to_visit)) { - topology_normalize_cpu_scale(); - schedule_work(&update_topology_flags_work); - free_raw_capacity(); + if (raw_capacity) { + topology_normalize_cpu_scale(); + schedule_work(&update_topology_flags_work); + free_raw_capacity(); + } pr_debug("cpu_capacity: parsing done\n"); schedule_work(&parsing_done_work); } @@ -436,7 +457,7 @@ static int __init register_cpufreq_notifier(void) * On ACPI-based systems skip registering cpufreq notifier as cpufreq * information is not needed for cpu capacity initialization. */ - if (!acpi_disabled || !raw_capacity) + if (!acpi_disabled) return -EINVAL;
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 0d43bf5b6cec..f53c14fb74fd 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1175,16 +1175,18 @@ int __pm_runtime_resume(struct device *dev, int rpmflags) EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/** - * pm_runtime_get_if_active - Conditionally bump up device usage counter. + * pm_runtime_get_conditional - Conditionally bump up device usage counter. * @dev: Device to handle. * @ign_usage_count: Whether or not to look at the current usage counter value. * * Return -EINVAL if runtime PM is disabled for @dev. * - * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either - * @ign_usage_count is %true or the runtime PM usage counter of @dev is not - * zero, increment the usage counter of @dev and return 1. Otherwise, return 0 - * without changing the usage counter. + * Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count + * is set, or (2) @dev is not ignoring children and its active child count is + * nonero, or (3) the runtime PM usage counter of @dev is not zero, increment + * the usage counter of @dev and return 1. + * + * Otherwise, return 0 without changing the usage counter. * * If @ign_usage_count is %true, this function can be used to prevent suspending * the device when its runtime PM status is %RPM_ACTIVE. @@ -1196,7 +1198,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume); * The caller is responsible for decrementing the runtime PM usage counter of * @dev after this function has returned a positive value for it. */ -int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count) +static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count) { unsigned long flags; int retval; @@ -1206,7 +1208,8 @@ int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count) retval = -EINVAL; } else if (dev->power.runtime_status != RPM_ACTIVE) { retval = 0; - } else if (ign_usage_count) { + } else if (ign_usage_count || (!dev->power.ignore_children && + atomic_read(&dev->power.child_count) > 0)) { retval = 1; atomic_inc(&dev->power.usage_count); } else { @@ -1217,8 +1220,45 @@ int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
return retval; } + +/** + * pm_runtime_get_if_active - Bump up runtime PM usage counter if the device is + * in active state + * @dev: Target device. + * + * Increment the runtime PM usage counter of @dev if its runtime PM status is + * %RPM_ACTIVE, in which case it returns 1. If the device is in a different + * state, 0 is returned. -EINVAL is returned if runtime PM is disabled for the + * device, in which case also the usage_count will remain unmodified. + */ +int pm_runtime_get_if_active(struct device *dev) +{ + return pm_runtime_get_conditional(dev, true); +} EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
+/** + * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter. + * @dev: Target device. + * + * Increment the runtime PM usage counter of @dev if its runtime PM status is + * %RPM_ACTIVE and its runtime PM usage counter is greater than 0 or it is not + * ignoring children and its active child count is nonzero. 1 is returned in + * this case. + * + * If @dev is in a different state or it is not in use (that is, its usage + * counter is 0, or it is ignoring children, or its active child count is 0), + * 0 is returned. + * + * -EINVAL is returned if runtime PM is disabled for the device, in which case + * also the usage counter of @dev is not updated. + */ +int pm_runtime_get_if_in_use(struct device *dev) +{ + return pm_runtime_get_conditional(dev, false); +} +EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use); + /** * __pm_runtime_set_status - Set runtime PM status of a device. * @dev: Device to handle. @@ -1754,6 +1794,11 @@ void pm_runtime_reinit(struct device *dev) pm_runtime_put(dev->parent); } } + /* + * Clear power.needs_force_resume in case it has been set by + * pm_runtime_force_suspend() invoked from a driver remove callback. + */ + dev->power.needs_force_resume = false; }
/** diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 0c9f54197768..ac18d36b0ea8 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -2500,7 +2500,11 @@ static int handle_write_conflicts(struct drbd_device *device, peer_req->w.cb = superseded ? e_send_superseded : e_send_retry_write; list_add_tail(&peer_req->w.list, &device->done_ee); - queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work); + /* put is in drbd_send_acks_wf() */ + kref_get(&device->kref); + if (!queue_work(connection->ack_sender, + &peer_req->peer_device->send_acks_work)) + kref_put(&device->kref, drbd_destroy_device);
err = -ENOENT; goto out; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 455e2a2b149f..ed004e1610dd 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1472,19 +1472,36 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg) return error; }
-static int loop_set_block_size(struct loop_device *lo, unsigned long arg) +static int loop_set_block_size(struct loop_device *lo, blk_mode_t mode, + struct block_device *bdev, unsigned long arg) { int err = 0;
- if (lo->lo_state != Lo_bound) - return -ENXIO; + /* + * If we don't hold exclusive handle for the device, upgrade to it + * here to avoid changing device under exclusive owner. + */ + if (!(mode & BLK_OPEN_EXCL)) { + err = bd_prepare_to_claim(bdev, loop_set_block_size, NULL); + if (err) + return err; + } + + err = mutex_lock_killable(&lo->lo_mutex); + if (err) + goto abort_claim; + + if (lo->lo_state != Lo_bound) { + err = -ENXIO; + goto unlock; + }
err = blk_validate_block_size(arg); if (err) return err;
if (lo->lo_queue->limits.logical_block_size == arg) - return 0; + goto unlock;
sync_blockdev(lo->lo_device); invalidate_bdev(lo->lo_device); @@ -1496,6 +1513,11 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg) loop_update_dio(lo); blk_mq_unfreeze_queue(lo->lo_queue);
+unlock: + mutex_unlock(&lo->lo_mutex); +abort_claim: + if (!(mode & BLK_OPEN_EXCL)) + bd_abort_claiming(bdev, loop_set_block_size); return err; }
@@ -1514,9 +1536,6 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd, case LOOP_SET_DIRECT_IO: err = loop_set_dio(lo, arg); break; - case LOOP_SET_BLOCK_SIZE: - err = loop_set_block_size(lo, arg); - break; default: err = -EINVAL; } @@ -1571,9 +1590,12 @@ static int lo_ioctl(struct block_device *bdev, blk_mode_t mode, break; case LOOP_GET_STATUS64: return loop_get_status64(lo, argp); + case LOOP_SET_BLOCK_SIZE: + if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN)) + return -EPERM; + return loop_set_block_size(lo, mode, bdev, arg); case LOOP_SET_CAPACITY: case LOOP_SET_DIRECT_IO: - case LOOP_SET_BLOCK_SIZE: if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN)) return -EPERM; fallthrough; diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 7bf4b48e2282..a379a37c9449 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -956,8 +956,10 @@ static bool vdc_port_mpgroup_check(struct vio_dev *vdev) dev = device_find_child(vdev->dev.parent, &port_data, vdc_device_probed);
- if (dev) + if (dev) { + put_device(dev); return true; + }
return false; } diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c index edc0ec5a0933..022571cfec5c 100644 --- a/drivers/bus/mhi/host/boot.c +++ b/drivers/bus/mhi/host/boot.c @@ -31,8 +31,8 @@ int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, int ret;
for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) { - bhi_vec->dma_addr = mhi_buf->dma_addr; - bhi_vec->size = mhi_buf->len; + bhi_vec->dma_addr = cpu_to_le64(mhi_buf->dma_addr); + bhi_vec->size = cpu_to_le64(mhi_buf->len); }
dev_dbg(dev, "BHIe programming for RDDM\n"); @@ -375,8 +375,8 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, while (remainder) { to_cpy = min(remainder, mhi_buf->len); memcpy(mhi_buf->buf, buf, to_cpy); - bhi_vec->dma_addr = mhi_buf->dma_addr; - bhi_vec->size = to_cpy; + bhi_vec->dma_addr = cpu_to_le64(mhi_buf->dma_addr); + bhi_vec->size = cpu_to_le64(to_cpy);
buf += to_cpy; remainder -= to_cpy; diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h index d2858236af52..88c9bc11f171 100644 --- a/drivers/bus/mhi/host/internal.h +++ b/drivers/bus/mhi/host/internal.h @@ -31,8 +31,8 @@ struct mhi_ctxt { };
struct bhi_vec_entry { - u64 dma_addr; - u64 size; + __le64 dma_addr; + __le64 size; };
enum mhi_ch_state_type { diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c index ad1e97222a0f..196929fff243 100644 --- a/drivers/bus/mhi/host/main.c +++ b/drivers/bus/mhi/host/main.c @@ -603,7 +603,7 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, { dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event); struct mhi_ring_element *local_rp, *ev_tre; - void *dev_rp; + void *dev_rp, *next_rp; struct mhi_buf_info *buf_info; u16 xfer_len;
@@ -622,6 +622,16 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, result.dir = mhi_chan->dir;
local_rp = tre_ring->rp; + + next_rp = local_rp + 1; + if (next_rp >= tre_ring->base + tre_ring->len) + next_rp = tre_ring->base; + if (dev_rp != next_rp && !MHI_TRE_DATA_GET_CHAIN(local_rp)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event element points to an unexpected TRE\n"); + break; + } + while (local_rp != dev_rp) { buf_info = buf_ring->rp; /* If it's the last TRE, get length from the event */ diff --git a/drivers/cdx/controller/cdx_rpmsg.c b/drivers/cdx/controller/cdx_rpmsg.c index 04b578a0be17..61f1a290ff08 100644 --- a/drivers/cdx/controller/cdx_rpmsg.c +++ b/drivers/cdx/controller/cdx_rpmsg.c @@ -129,8 +129,7 @@ static int cdx_rpmsg_probe(struct rpmsg_device *rpdev)
chinfo.src = RPMSG_ADDR_ANY; chinfo.dst = rpdev->dst; - strscpy(chinfo.name, cdx_rpmsg_id_table[0].name, - strlen(cdx_rpmsg_id_table[0].name)); + strscpy(chinfo.name, cdx_rpmsg_id_table[0].name, sizeof(chinfo.name));
cdx_mcdi->ept = rpmsg_create_ept(rpdev, cdx_rpmsg_cb, NULL, chinfo); if (!cdx_mcdi->ept) { diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index db8f1dadaa9f..96f175bd6d9f 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -4618,10 +4618,10 @@ static int handle_one_recv_msg(struct ipmi_smi *intf, * The NetFN and Command in the response is not even * marginally correct. */ - dev_warn(intf->si_dev, - "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", - (msg->data[0] >> 2) | 1, msg->data[1], - msg->rsp[0] >> 2, msg->rsp[1]); + dev_warn_ratelimited(intf->si_dev, + "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", + (msg->data[0] >> 2) | 1, msg->data[1], + msg->rsp[0] >> 2, msg->rsp[1]);
goto return_unspecified; } diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 9a459257489f..ca149ca8ccd6 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -1190,14 +1190,8 @@ static struct ipmi_smi_watcher smi_watcher = { .smi_gone = ipmi_smi_gone };
-static int action_op(const char *inval, char *outval) +static int action_op_set_val(const char *inval) { - if (outval) - strcpy(outval, action); - - if (!inval) - return 0; - if (strcmp(inval, "reset") == 0) action_val = WDOG_TIMEOUT_RESET; else if (strcmp(inval, "none") == 0) @@ -1208,18 +1202,26 @@ static int action_op(const char *inval, char *outval) action_val = WDOG_TIMEOUT_POWER_DOWN; else return -EINVAL; - strcpy(action, inval); return 0; }
-static int preaction_op(const char *inval, char *outval) +static int action_op(const char *inval, char *outval) { + int rv; + if (outval) - strcpy(outval, preaction); + strcpy(outval, action);
if (!inval) return 0; + rv = action_op_set_val(inval); + if (!rv) + strcpy(action, inval); + return rv; +}
+static int preaction_op_set_val(const char *inval) +{ if (strcmp(inval, "pre_none") == 0) preaction_val = WDOG_PRETIMEOUT_NONE; else if (strcmp(inval, "pre_smi") == 0) @@ -1232,18 +1234,26 @@ static int preaction_op(const char *inval, char *outval) preaction_val = WDOG_PRETIMEOUT_MSG_INT; else return -EINVAL; - strcpy(preaction, inval); return 0; }
-static int preop_op(const char *inval, char *outval) +static int preaction_op(const char *inval, char *outval) { + int rv; + if (outval) - strcpy(outval, preop); + strcpy(outval, preaction);
if (!inval) return 0; + rv = preaction_op_set_val(inval); + if (!rv) + strcpy(preaction, inval); + return 0; +}
+static int preop_op_set_val(const char *inval) +{ if (strcmp(inval, "preop_none") == 0) preop_val = WDOG_PREOP_NONE; else if (strcmp(inval, "preop_panic") == 0) @@ -1252,7 +1262,22 @@ static int preop_op(const char *inval, char *outval) preop_val = WDOG_PREOP_GIVE_DATA; else return -EINVAL; - strcpy(preop, inval); + return 0; +} + +static int preop_op(const char *inval, char *outval) +{ + int rv; + + if (outval) + strcpy(outval, preop); + + if (!inval) + return 0; + + rv = preop_op_set_val(inval); + if (!rv) + strcpy(preop, inval); return 0; }
@@ -1289,18 +1314,18 @@ static int __init ipmi_wdog_init(void) { int rv;
- if (action_op(action, NULL)) { + if (action_op_set_val(action)) { action_op("reset", NULL); pr_info("Unknown action '%s', defaulting to reset\n", action); }
- if (preaction_op(preaction, NULL)) { + if (preaction_op_set_val(preaction)) { preaction_op("pre_none", NULL); pr_info("Unknown preaction '%s', defaulting to none\n", preaction); }
- if (preop_op(preop, NULL)) { + if (preop_op_set_val(preop)) { preop_op("preop_none", NULL); pr_info("Unknown preop '%s', defaulting to none\n", preop); } diff --git a/drivers/char/misc.c b/drivers/char/misc.c index dda466f9181a..30178e20d962 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c @@ -314,8 +314,8 @@ static int __init misc_init(void) if (err) goto fail_remove;
- err = -EIO; - if (__register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops)) + err = __register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops); + if (err < 0) goto fail_printk; return 0;
diff --git a/drivers/clk/qcom/gcc-ipq5018.c b/drivers/clk/qcom/gcc-ipq5018.c index 3136ba1c2a59..915e84db3c97 100644 --- a/drivers/clk/qcom/gcc-ipq5018.c +++ b/drivers/clk/qcom/gcc-ipq5018.c @@ -1370,7 +1370,7 @@ static struct clk_branch gcc_xo_clk = { &gcc_xo_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, + .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c index 0626650a7011..c9fc52a36fce 100644 --- a/drivers/clk/tegra/clk-periph.c +++ b/drivers/clk/tegra/clk-periph.c @@ -51,7 +51,7 @@ static int clk_periph_determine_rate(struct clk_hw *hw, struct tegra_clk_periph *periph = to_clk_periph(hw); const struct clk_ops *div_ops = periph->div_ops; struct clk_hw *div_hw = &periph->divider.hw; - unsigned long rate; + long rate;
__clk_hw_set_clk(div_hw, hw);
@@ -59,7 +59,7 @@ static int clk_periph_determine_rate(struct clk_hw *hw, if (rate < 0) return rate;
- req->rate = rate; + req->rate = (unsigned long)rate; return 0; }
diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c index e4d62cdaff46..1f12f7ea3dc4 100644 --- a/drivers/comedi/comedi_fops.c +++ b/drivers/comedi/comedi_fops.c @@ -787,6 +787,7 @@ static int is_device_busy(struct comedi_device *dev) struct comedi_subdevice *s; int i;
+ lockdep_assert_held_write(&dev->attach_lock); lockdep_assert_held(&dev->mutex); if (!dev->attached) return 0; @@ -795,7 +796,16 @@ static int is_device_busy(struct comedi_device *dev) s = &dev->subdevices[i]; if (s->busy) return 1; - if (s->async && comedi_buf_is_mmapped(s)) + if (!s->async) + continue; + if (comedi_buf_is_mmapped(s)) + return 1; + /* + * There may be tasks still waiting on the subdevice's wait + * queue, although they should already be about to be removed + * from it since the subdevice has no active async command. + */ + if (wq_has_sleeper(&s->async->wait_head)) return 1; }
@@ -825,15 +835,22 @@ static int do_devconfig_ioctl(struct comedi_device *dev, return -EPERM;
if (!arg) { - if (is_device_busy(dev)) - return -EBUSY; + int rc = 0; + if (dev->attached) { - struct module *driver_module = dev->driver->module; + down_write(&dev->attach_lock); + if (is_device_busy(dev)) { + rc = -EBUSY; + } else { + struct module *driver_module = + dev->driver->module;
- comedi_device_detach(dev); - module_put(driver_module); + comedi_device_detach_locked(dev); + module_put(driver_module); + } + up_write(&dev->attach_lock); } - return 0; + return rc; }
if (copy_from_user(&it, arg, sizeof(it))) @@ -1570,6 +1587,9 @@ static int do_insnlist_ioctl(struct comedi_device *dev, memset(&data[n], 0, (MIN_SAMPLES - n) * sizeof(unsigned int)); } + } else { + memset(data, 0, max_t(unsigned int, n, MIN_SAMPLES) * + sizeof(unsigned int)); } ret = parse_insn(dev, insns + i, data, file); if (ret < 0) @@ -1653,6 +1673,8 @@ static int do_insn_ioctl(struct comedi_device *dev, memset(&data[insn->n], 0, (MIN_SAMPLES - insn->n) * sizeof(unsigned int)); } + } else { + memset(data, 0, n_data * sizeof(unsigned int)); } ret = parse_insn(dev, insn, data, file); if (ret < 0) diff --git a/drivers/comedi/comedi_internal.h b/drivers/comedi/comedi_internal.h index 9b3631a654c8..cf10ba016ebc 100644 --- a/drivers/comedi/comedi_internal.h +++ b/drivers/comedi/comedi_internal.h @@ -50,6 +50,7 @@ extern struct mutex comedi_drivers_list_lock; int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data);
+void comedi_device_detach_locked(struct comedi_device *dev); void comedi_device_detach(struct comedi_device *dev); int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it); diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c index 086213bcc499..ce4cde140518 100644 --- a/drivers/comedi/drivers.c +++ b/drivers/comedi/drivers.c @@ -158,7 +158,7 @@ static void comedi_device_detach_cleanup(struct comedi_device *dev) int i; struct comedi_subdevice *s;
- lockdep_assert_held(&dev->attach_lock); + lockdep_assert_held_write(&dev->attach_lock); lockdep_assert_held(&dev->mutex); if (dev->subdevices) { for (i = 0; i < dev->n_subdevices; i++) { @@ -195,16 +195,23 @@ static void comedi_device_detach_cleanup(struct comedi_device *dev) comedi_clear_hw_dev(dev); }
-void comedi_device_detach(struct comedi_device *dev) +void comedi_device_detach_locked(struct comedi_device *dev) { + lockdep_assert_held_write(&dev->attach_lock); lockdep_assert_held(&dev->mutex); comedi_device_cancel_all(dev); - down_write(&dev->attach_lock); dev->attached = false; dev->detach_count++; if (dev->driver) dev->driver->detach(dev); comedi_device_detach_cleanup(dev); +} + +void comedi_device_detach(struct comedi_device *dev) +{ + lockdep_assert_held(&dev->mutex); + down_write(&dev->attach_lock); + comedi_device_detach_locked(dev); up_write(&dev->attach_lock); }
@@ -612,11 +619,9 @@ static int insn_rw_emulate_bits(struct comedi_device *dev, unsigned int chan = CR_CHAN(insn->chanspec); unsigned int base_chan = (chan < 32) ? 0 : chan; unsigned int _data[2]; + unsigned int i; int ret;
- if (insn->n == 0) - return 0; - memset(_data, 0, sizeof(_data)); memset(&_insn, 0, sizeof(_insn)); _insn.insn = INSN_BITS; @@ -627,18 +632,21 @@ static int insn_rw_emulate_bits(struct comedi_device *dev, if (insn->insn == INSN_WRITE) { if (!(s->subdev_flags & SDF_WRITABLE)) return -EINVAL; - _data[0] = 1U << (chan - base_chan); /* mask */ - _data[1] = data[0] ? (1U << (chan - base_chan)) : 0; /* bits */ + _data[0] = 1U << (chan - base_chan); /* mask */ } + for (i = 0; i < insn->n; i++) { + if (insn->insn == INSN_WRITE) + _data[1] = data[i] ? _data[0] : 0; /* bits */
- ret = s->insn_bits(dev, s, &_insn, _data); - if (ret < 0) - return ret; + ret = s->insn_bits(dev, s, &_insn, _data); + if (ret < 0) + return ret;
- if (insn->insn == INSN_READ) - data[0] = (_data[1] >> (chan - base_chan)) & 1; + if (insn->insn == INSN_READ) + data[i] = (_data[1] >> (chan - base_chan)) & 1; + }
- return 1; + return insn->n; }
static int __comedi_device_postconfig_async(struct comedi_device *dev, diff --git a/drivers/comedi/drivers/pcl726.c b/drivers/comedi/drivers/pcl726.c index 0430630e6ebb..b542896fa0e4 100644 --- a/drivers/comedi/drivers/pcl726.c +++ b/drivers/comedi/drivers/pcl726.c @@ -328,7 +328,8 @@ static int pcl726_attach(struct comedi_device *dev, * Hook up the external trigger source interrupt only if the * user config option is valid and the board supports interrupts. */ - if (it->options[1] && (board->irq_mask & (1 << it->options[1]))) { + if (it->options[1] > 0 && it->options[1] < 16 && + (board->irq_mask & (1U << it->options[1]))) { ret = request_irq(it->options[1], pcl726_interrupt, 0, dev->board_name, dev); if (ret == 0) { diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c index 8afefdea4d80..8a01032e57fa 100644 --- a/drivers/cpufreq/armada-8k-cpufreq.c +++ b/drivers/cpufreq/armada-8k-cpufreq.c @@ -103,7 +103,7 @@ static void armada_8k_cpufreq_free_table(struct freq_table *freq_tables) { int opps_index, nb_cpus = num_possible_cpus();
- for (opps_index = 0 ; opps_index <= nb_cpus; opps_index++) { + for (opps_index = 0 ; opps_index < nb_cpus; opps_index++) { int i;
/* If cpu_dev is NULL then we reached the end of the array */ diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index aa34af940cb5..ea32bdf7cc24 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -847,7 +847,7 @@ static struct freq_attr *cppc_cpufreq_attr[] = { };
static struct cpufreq_driver cppc_cpufreq_driver = { - .flags = CPUFREQ_CONST_LOOPS, + .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS, .verify = cppc_verify_policy, .target = cppc_cpufreq_set_target, .get = cppc_cpufreq_get_rate, diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index cc98d8cf5433..30d8f2ada0f1 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -454,7 +454,7 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
arch_set_freq_scale(policy->related_cpus, policy->cur, - policy->cpuinfo.max_freq); + arch_scale_freq_ref(policy->cpu));
spin_lock(&policy->transition_lock); policy->transition_ongoing = false; @@ -2205,7 +2205,7 @@ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
policy->cur = freq; arch_set_freq_scale(policy->related_cpus, freq, - policy->cpuinfo.max_freq); + arch_scale_freq_ref(policy->cpu)); cpufreq_stats_record_transition(policy, freq);
if (trace_cpu_frequency_enabled()) { @@ -2701,10 +2701,12 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, pr_debug("starting governor %s failed\n", policy->governor->name); if (old_gov) { policy->governor = old_gov; - if (cpufreq_init_governor(policy)) + if (cpufreq_init_governor(policy)) { policy->governor = NULL; - else - cpufreq_start_governor(policy); + } else if (cpufreq_start_governor(policy)) { + cpufreq_exit_governor(policy); + policy->governor = NULL; + } }
return ret; diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index edd9a8fb9878..92f9c00ad5f9 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -21,7 +21,7 @@
#include "gov.h"
-#define BUCKETS 12 +#define BUCKETS 6 #define INTERVAL_SHIFT 3 #define INTERVALS (1UL << INTERVAL_SHIFT) #define RESOLUTION 1024 @@ -31,12 +31,11 @@ /* * Concepts and ideas behind the menu governor * - * For the menu governor, there are 3 decision factors for picking a C + * For the menu governor, there are 2 decision factors for picking a C * state: * 1) Energy break even point - * 2) Performance impact - * 3) Latency tolerance (from pmqos infrastructure) - * These three factors are treated independently. + * 2) Latency tolerance (from pmqos infrastructure) + * These two factors are treated independently. * * Energy break even point * ----------------------- @@ -119,19 +118,10 @@ struct menu_device { int interval_ptr; };
-static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters) +static inline int which_bucket(u64 duration_ns) { int bucket = 0;
- /* - * We keep two groups of stats; one with no - * IO pending, one without. - * This allows us to calculate - * E(duration)|iowait - */ - if (nr_iowaiters) - bucket = BUCKETS/2; - if (duration_ns < 10ULL * NSEC_PER_USEC) return bucket; if (duration_ns < 100ULL * NSEC_PER_USEC) @@ -145,21 +135,16 @@ static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters) return bucket + 5; }
-/* - * Return a multiplier for the exit latency that is intended - * to take performance requirements into account. - * The more performance critical we estimate the system - * to be, the higher this multiplier, and thus the higher - * the barrier to go to an expensive C state. - */ -static inline int performance_multiplier(unsigned int nr_iowaiters) +static DEFINE_PER_CPU(struct menu_device, menu_devices); + +static void menu_update_intervals(struct menu_device *data, unsigned int interval_us) { - /* for IO wait tasks (per cpu!) we add 10x each */ - return 1 + 10 * nr_iowaiters; + /* Update the repeating-pattern data. */ + data->intervals[data->interval_ptr++] = interval_us; + if (data->interval_ptr >= INTERVALS) + data->interval_ptr = 0; }
-static DEFINE_PER_CPU(struct menu_device, menu_devices); - static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
/* @@ -276,18 +261,22 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, struct menu_device *data = this_cpu_ptr(&menu_devices); s64 latency_req = cpuidle_governor_latency_req(dev->cpu); u64 predicted_ns; - u64 interactivity_req; - unsigned int nr_iowaiters; ktime_t delta, delta_tick; int i, idx;
if (data->needs_update) { menu_update(drv, dev); data->needs_update = 0; + } else if (!dev->last_residency_ns) { + /* + * This happens when the driver rejects the previously selected + * idle state and returns an error, so update the recent + * intervals table to prevent invalid information from being + * used going forward. + */ + menu_update_intervals(data, UINT_MAX); }
- nr_iowaiters = nr_iowait_cpu(dev->cpu); - /* Find the shortest expected idle interval. */ predicted_ns = get_typical_interval(data) * NSEC_PER_USEC; if (predicted_ns > RESIDENCY_THRESHOLD_NS) { @@ -301,7 +290,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, }
data->next_timer_ns = delta; - data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters); + data->bucket = which_bucket(data->next_timer_ns);
/* Round up the result for half microseconds. */ timer_us = div_u64((RESOLUTION * DECAY * NSEC_PER_USEC) / 2 + @@ -319,7 +308,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, */ data->next_timer_ns = KTIME_MAX; delta_tick = TICK_NSEC / 2; - data->bucket = which_bucket(KTIME_MAX, nr_iowaiters); + data->bucket = which_bucket(KTIME_MAX); }
if (unlikely(drv->state_count <= 1 || latency_req == 0) || @@ -335,27 +324,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, return 0; }
- if (tick_nohz_tick_stopped()) { - /* - * If the tick is already stopped, the cost of possible short - * idle duration misprediction is much higher, because the CPU - * may be stuck in a shallow idle state for a long time as a - * result of it. In that case say we might mispredict and use - * the known time till the closest timer event for the idle - * state selection. - */ - if (predicted_ns < TICK_NSEC) - predicted_ns = data->next_timer_ns; - } else { - /* - * Use the performance multiplier and the user-configurable - * latency_req to determine the maximum exit latency. - */ - interactivity_req = div64_u64(predicted_ns, - performance_multiplier(nr_iowaiters)); - if (latency_req > interactivity_req) - latency_req = interactivity_req; - } + /* + * If the tick is already stopped, the cost of possible short idle + * duration misprediction is much higher, because the CPU may be stuck + * in a shallow idle state for a long time as a result of it. In that + * case, say we might mispredict and use the known time till the closest + * timer event for the idle state selection. + */ + if (tick_nohz_tick_stopped() && predicted_ns < TICK_NSEC) + predicted_ns = data->next_timer_ns;
/* * Find the idle state with the lowest power while satisfying @@ -371,13 +348,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, if (idx == -1) idx = i; /* first enabled state */
+ if (s->exit_latency_ns > latency_req) + break; + if (s->target_residency_ns > predicted_ns) { /* * Use a physical idle state, not busy polling, unless * a timer is going to trigger soon enough. */ if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && - s->exit_latency_ns <= latency_req && s->target_residency_ns <= data->next_timer_ns) { predicted_ns = s->target_residency_ns; idx = i; @@ -409,8 +388,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
return idx; } - if (s->exit_latency_ns > latency_req) - break;
idx = i; } @@ -553,10 +530,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->correction_factor[data->bucket] = new_factor;
- /* update the repeating-pattern data */ - data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns); - if (data->interval_ptr >= INTERVALS) - data->interval_ptr = 0; + menu_update_intervals(data, ktime_to_us(measured_ns)); }
/** diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 9a1c61be32cc..059319f7a716 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -1482,11 +1482,13 @@ static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp) if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+ /* Do unmap before data processing */ + hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src); + p = sg_virt(areq->dst); memmove(p, p + ctx->key_sz - curve_sz, curve_sz); memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
- hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src); kpp_request_complete(areq, ret);
atomic64_inc(&dfx[HPRE_RECV_CNT].value); @@ -1796,9 +1798,11 @@ static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp) if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+ /* Do unmap before data processing */ + hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src); + hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
- hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src); kpp_request_complete(areq, ret);
atomic64_inc(&dfx[HPRE_RECV_CNT].value); diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 79ff7982378d..05d1402001a1 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -193,6 +193,7 @@ void adf_exit_misc_wq(void); bool adf_misc_wq_queue_work(struct work_struct *work); bool adf_misc_wq_queue_delayed_work(struct delayed_work *work, unsigned long delay); +void adf_misc_wq_flush(void); #if defined(CONFIG_PCI_IOV) int adf_sriov_configure(struct pci_dev *pdev, int numvfs); void adf_disable_sriov(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index 0f9e2d59ce38..9c1b587a87e5 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -381,6 +381,7 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) hw_data->exit_admin_comms(accel_dev);
adf_cleanup_etr_data(accel_dev); + adf_misc_wq_flush(); adf_dev_restore(accel_dev); }
diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c index 2aba194a7c29..ce7c9ef6346b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c @@ -386,3 +386,8 @@ bool adf_misc_wq_queue_delayed_work(struct delayed_work *work, { return queue_delayed_work(adf_misc_wq, work, delay); } + +void adf_misc_wq_flush(void) +{ + flush_workqueue(adf_misc_wq); +} diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c index 3c4bba4a8779..d69cc1e5e023 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_algs.c @@ -1277,7 +1277,7 @@ static struct aead_alg qat_aeads[] = { { .base = { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "qat_aes_cbc_hmac_sha1", - .cra_priority = 4001, + .cra_priority = 100, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), @@ -1294,7 +1294,7 @@ static struct aead_alg qat_aeads[] = { { .base = { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "qat_aes_cbc_hmac_sha256", - .cra_priority = 4001, + .cra_priority = 100, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), @@ -1311,7 +1311,7 @@ static struct aead_alg qat_aeads[] = { { .base = { .cra_name = "authenc(hmac(sha512),cbc(aes))", .cra_driver_name = "qat_aes_cbc_hmac_sha512", - .cra_priority = 4001, + .cra_priority = 100, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), @@ -1329,7 +1329,7 @@ static struct aead_alg qat_aeads[] = { { static struct skcipher_alg qat_skciphers[] = { { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "qat_aes_cbc", - .base.cra_priority = 4001, + .base.cra_priority = 100, .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx), @@ -1347,7 +1347,7 @@ static struct skcipher_alg qat_skciphers[] = { { }, { .base.cra_name = "ctr(aes)", .base.cra_driver_name = "qat_aes_ctr", - .base.cra_priority = 4001, + .base.cra_priority = 100, .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx), @@ -1365,7 +1365,7 @@ static struct skcipher_alg qat_skciphers[] = { { }, { .base.cra_name = "xts(aes)", .base.cra_driver_name = "qat_aes_xts", - .base.cra_priority = 4001, + .base.cra_priority = 100, .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index 1958b797a421..682e7d80adb8 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -1485,6 +1485,7 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf) dma_addr_t rptr_baddr; struct pci_dev *pdev; u32 len, compl_rlen; + int timeout = 10000; int ret, etype; void *rptr;
@@ -1547,16 +1548,27 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf) etype); otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr); lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]); + timeout = 10000;
while (lfs->ops->cpt_get_compcode(result) == - OTX2_CPT_COMPLETION_CODE_INIT) + OTX2_CPT_COMPLETION_CODE_INIT) { cpu_relax(); + udelay(1); + timeout--; + if (!timeout) { + ret = -ENODEV; + cptpf->is_eng_caps_discovered = false; + dev_warn(&pdev->dev, "Timeout on CPT load_fvc completion poll\n"); + goto error_no_response; + } + }
cptpf->eng_caps[etype].u = be64_to_cpup(rptr); } - dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL); cptpf->is_eng_caps_discovered = true;
+error_no_response: + dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL); free_result: kfree(result); lf_cleanup: diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c index d69672ccacc4..8d057cea09d5 100644 --- a/drivers/devfreq/governor_userspace.c +++ b/drivers/devfreq/governor_userspace.c @@ -9,6 +9,7 @@ #include <linux/slab.h> #include <linux/device.h> #include <linux/devfreq.h> +#include <linux/kstrtox.h> #include <linux/pm.h> #include <linux/mutex.h> #include <linux/module.h> @@ -39,10 +40,13 @@ static ssize_t set_freq_store(struct device *dev, struct device_attribute *attr, unsigned long wanted; int err = 0;
+ err = kstrtoul(buf, 0, &wanted); + if (err) + return err; + mutex_lock(&devfreq->lock); data = devfreq->governor_data;
- sscanf(buf, "%lu", &wanted); data->user_frequency = wanted; data->valid = true; err = update_devfreq(devfreq); diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 9840594a6aaa..3882080cffa6 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -745,7 +745,7 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr) /* cyclic while CIRC/DBM disable => post resume reconfiguration needed */ if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM))) stm32_dma_post_resume_reconfigure(chan); - else if (scr & STM32_DMA_SCR_DBM) + else if (scr & STM32_DMA_SCR_DBM && chan->desc->num_sgs > 2) stm32_dma_configure_next_sg(chan); } else { chan->busy = false; diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c index 6ddc90d7ba7c..f8aaada42d3f 100644 --- a/drivers/edac/synopsys_edac.c +++ b/drivers/edac/synopsys_edac.c @@ -332,20 +332,26 @@ struct synps_edac_priv { #endif };
+enum synps_platform_type { + ZYNQ, + ZYNQMP, + SYNPS, +}; + /** * struct synps_platform_data - synps platform data structure. + * @platform: Identifies the target hardware platform * @get_error_info: Get EDAC error info. * @get_mtype: Get mtype. * @get_dtype: Get dtype. - * @get_ecc_state: Get ECC state. * @get_mem_info: Get EDAC memory info * @quirks: To differentiate IPs. */ struct synps_platform_data { + enum synps_platform_type platform; int (*get_error_info)(struct synps_edac_priv *priv); enum mem_type (*get_mtype)(const void __iomem *base); enum dev_type (*get_dtype)(const void __iomem *base); - bool (*get_ecc_state)(void __iomem *base); #ifdef CONFIG_EDAC_DEBUG u64 (*get_mem_info)(struct synps_edac_priv *priv); #endif @@ -720,51 +726,38 @@ static enum dev_type zynqmp_get_dtype(const void __iomem *base) return dt; }
-/** - * zynq_get_ecc_state - Return the controller ECC enable/disable status. - * @base: DDR memory controller base address. - * - * Get the ECC enable/disable status of the controller. - * - * Return: true if enabled, otherwise false. - */ -static bool zynq_get_ecc_state(void __iomem *base) +static bool get_ecc_state(struct synps_edac_priv *priv) { + u32 ecctype, clearval; enum dev_type dt; - u32 ecctype; - - dt = zynq_get_dtype(base); - if (dt == DEV_UNKNOWN) - return false;
- ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK; - if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2)) - return true; - - return false; -} - -/** - * zynqmp_get_ecc_state - Return the controller ECC enable/disable status. - * @base: DDR memory controller base address. - * - * Get the ECC enable/disable status for the controller. - * - * Return: a ECC status boolean i.e true/false - enabled/disabled. - */ -static bool zynqmp_get_ecc_state(void __iomem *base) -{ - enum dev_type dt; - u32 ecctype; - - dt = zynqmp_get_dtype(base); - if (dt == DEV_UNKNOWN) - return false; - - ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK; - if ((ecctype == SCRUB_MODE_SECDED) && - ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8))) - return true; + if (priv->p_data->platform == ZYNQ) { + dt = zynq_get_dtype(priv->baseaddr); + if (dt == DEV_UNKNOWN) + return false; + + ecctype = readl(priv->baseaddr + SCRUB_OFST) & SCRUB_MODE_MASK; + if (ecctype == SCRUB_MODE_SECDED && dt == DEV_X2) { + clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_UE_ERR; + writel(clearval, priv->baseaddr + ECC_CTRL_OFST); + writel(0x0, priv->baseaddr + ECC_CTRL_OFST); + return true; + } + } else { + dt = zynqmp_get_dtype(priv->baseaddr); + if (dt == DEV_UNKNOWN) + return false; + + ecctype = readl(priv->baseaddr + ECC_CFG0_OFST) & SCRUB_MODE_MASK; + if (ecctype == SCRUB_MODE_SECDED && + (dt == DEV_X2 || dt == DEV_X4 || dt == DEV_X8)) { + clearval = readl(priv->baseaddr + ECC_CLR_OFST) | + ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT | + ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT; + writel(clearval, priv->baseaddr + ECC_CLR_OFST); + return true; + } + }
return false; } @@ -934,18 +927,18 @@ static int setup_irq(struct mem_ctl_info *mci, }
static const struct synps_platform_data zynq_edac_def = { + .platform = ZYNQ, .get_error_info = zynq_get_error_info, .get_mtype = zynq_get_mtype, .get_dtype = zynq_get_dtype, - .get_ecc_state = zynq_get_ecc_state, .quirks = 0, };
static const struct synps_platform_data zynqmp_edac_def = { + .platform = ZYNQMP, .get_error_info = zynqmp_get_error_info, .get_mtype = zynqmp_get_mtype, .get_dtype = zynqmp_get_dtype, - .get_ecc_state = zynqmp_get_ecc_state, #ifdef CONFIG_EDAC_DEBUG .get_mem_info = zynqmp_get_mem_info, #endif @@ -957,10 +950,10 @@ static const struct synps_platform_data zynqmp_edac_def = { };
static const struct synps_platform_data synopsys_edac_def = { + .platform = SYNPS, .get_error_info = zynqmp_get_error_info, .get_mtype = zynqmp_get_mtype, .get_dtype = zynqmp_get_dtype, - .get_ecc_state = zynqmp_get_ecc_state, .quirks = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR #ifdef CONFIG_EDAC_DEBUG | DDR_ECC_DATA_POISON_SUPPORT @@ -1392,10 +1385,6 @@ static int mc_probe(struct platform_device *pdev) if (!p_data) return -ENODEV;
- if (!p_data->get_ecc_state(baseaddr)) { - edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n"); - return -ENXIO; - }
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; layers[0].size = SYNPS_EDAC_NR_CSROWS; @@ -1415,6 +1404,12 @@ static int mc_probe(struct platform_device *pdev) priv = mci->pvt_info; priv->baseaddr = baseaddr; priv->p_data = p_data; + if (!get_ecc_state(priv)) { + edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n"); + rc = -ENODEV; + goto free_edac_mc; + } + spin_lock_init(&priv->reglock);
mc_init(mci, pdev); diff --git a/drivers/firmware/tegra/Kconfig b/drivers/firmware/tegra/Kconfig index cde1ab8bd9d1..91f2320c0d0f 100644 --- a/drivers/firmware/tegra/Kconfig +++ b/drivers/firmware/tegra/Kconfig @@ -2,7 +2,7 @@ menu "Tegra firmware driver"
config TEGRA_IVC - bool "Tegra IVC protocol" + bool "Tegra IVC protocol" if COMPILE_TEST depends on ARCH_TEGRA help IVC (Inter-VM Communication) protocol is part of the IPC @@ -13,8 +13,9 @@ config TEGRA_IVC
config TEGRA_BPMP bool "Tegra BPMP driver" - depends on ARCH_TEGRA && TEGRA_HSP_MBOX && TEGRA_IVC + depends on ARCH_TEGRA && TEGRA_HSP_MBOX depends on !CPU_BIG_ENDIAN + select TEGRA_IVC help BPMP (Boot and Power Management Processor) is designed to off-loading the PM functions which include clock/DVFS/thermal/power from the CPU. diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c index 96611d424a10..b77097c93973 100644 --- a/drivers/fpga/zynq-fpga.c +++ b/drivers/fpga/zynq-fpga.c @@ -405,12 +405,12 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt) } }
- priv->dma_nelms = - dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE); - if (priv->dma_nelms == 0) { + err = dma_map_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0); + if (err) { dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n"); - return -ENOMEM; + return err; } + priv->dma_nelms = sgt->nents;
/* enable clock */ err = clk_enable(priv->clk); @@ -478,7 +478,7 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt) clk_disable(priv->clk);
out_free: - dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE); + dma_unmap_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0); return err; }
diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c index 6abe01bc39c3..c03945af8538 100644 --- a/drivers/gpio/gpio-mlxbf2.c +++ b/drivers/gpio/gpio-mlxbf2.c @@ -397,7 +397,7 @@ mlxbf2_gpio_probe(struct platform_device *pdev) gc->ngpio = npins; gc->owner = THIS_MODULE;
- irq = platform_get_irq(pdev, 0); + irq = platform_get_irq_optional(pdev, 0); if (irq >= 0) { girq = &gs->gc.irq; gpio_irq_chip_set_chip(girq, &mlxbf2_gpio_irq_chip); diff --git a/drivers/gpio/gpio-mlxbf3.c b/drivers/gpio/gpio-mlxbf3.c index 9875e34bde72..ed29b07d16c1 100644 --- a/drivers/gpio/gpio-mlxbf3.c +++ b/drivers/gpio/gpio-mlxbf3.c @@ -190,9 +190,7 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev) struct mlxbf3_gpio_context *gs; struct gpio_irq_chip *girq; struct gpio_chip *gc; - char *colon_ptr; int ret, irq; - long num;
gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL); if (!gs) @@ -229,39 +227,25 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev) gc->owner = THIS_MODULE; gc->add_pin_ranges = mlxbf3_gpio_add_pin_ranges;
- colon_ptr = strchr(dev_name(dev), ':'); - if (!colon_ptr) { - dev_err(dev, "invalid device name format\n"); - return -EINVAL; - } - - ret = kstrtol(++colon_ptr, 16, &num); - if (ret) { - dev_err(dev, "invalid device instance\n"); - return ret; - } - - if (!num) { - irq = platform_get_irq(pdev, 0); - if (irq >= 0) { - girq = &gs->gc.irq; - gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip); - girq->default_type = IRQ_TYPE_NONE; - /* This will let us handle the parent IRQ in the driver */ - girq->num_parents = 0; - girq->parents = NULL; - girq->parent_handler = NULL; - girq->handler = handle_bad_irq; - - /* - * Directly request the irq here instead of passing - * a flow-handler because the irq is shared. - */ - ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler, - IRQF_SHARED, dev_name(dev), gs); - if (ret) - return dev_err_probe(dev, ret, "failed to request IRQ"); - } + irq = platform_get_irq_optional(pdev, 0); + if (irq >= 0) { + girq = &gs->gc.irq; + gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip); + girq->default_type = IRQ_TYPE_NONE; + /* This will let us handle the parent IRQ in the driver */ + girq->num_parents = 0; + girq->parents = NULL; + girq->parent_handler = NULL; + girq->handler = handle_bad_irq; + + /* + * Directly request the irq here instead of passing + * a flow-handler because the irq is shared. + */ + ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler, + IRQF_SHARED, dev_name(dev), gs); + if (ret) + return dev_err_probe(dev, ret, "failed to request IRQ"); }
platform_set_drvdata(pdev, gs); diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c index fab771cb6a87..bac757c191c2 100644 --- a/drivers/gpio/gpio-tps65912.c +++ b/drivers/gpio/gpio-tps65912.c @@ -49,10 +49,13 @@ static int tps65912_gpio_direction_output(struct gpio_chip *gc, unsigned offset, int value) { struct tps65912_gpio *gpio = gpiochip_get_data(gc); + int ret;
/* Set the initial value */ - regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset, - GPIO_SET_MASK, value ? GPIO_SET_MASK : 0); + ret = regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset, + GPIO_SET_MASK, value ? GPIO_SET_MASK : 0); + if (ret) + return ret;
return regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset, GPIO_CFG_MASK, GPIO_CFG_MASK); diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c index fcc5e8c08973..1d0eb49eae3b 100644 --- a/drivers/gpio/gpio-virtio.c +++ b/drivers/gpio/gpio-virtio.c @@ -539,7 +539,6 @@ static const char **virtio_gpio_get_names(struct virtio_gpio *vgpio,
static int virtio_gpio_probe(struct virtio_device *vdev) { - struct virtio_gpio_config config; struct device *dev = &vdev->dev; struct virtio_gpio *vgpio; u32 gpio_names_size; @@ -551,9 +550,11 @@ static int virtio_gpio_probe(struct virtio_device *vdev) return -ENOMEM;
/* Read configuration */ - virtio_cread_bytes(vdev, 0, &config, sizeof(config)); - gpio_names_size = le32_to_cpu(config.gpio_names_size); - ngpio = le16_to_cpu(config.ngpio); + gpio_names_size = + virtio_cread32(vdev, offsetof(struct virtio_gpio_config, + gpio_names_size)); + ngpio = virtio_cread16(vdev, offsetof(struct virtio_gpio_config, + ngpio)); if (!ngpio) { dev_err(dev, "Number of GPIOs can't be zero\n"); return -EINVAL; diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c index 2bba27b13947..cfa7b0a50c8e 100644 --- a/drivers/gpio/gpio-wcd934x.c +++ b/drivers/gpio/gpio-wcd934x.c @@ -46,9 +46,12 @@ static int wcd_gpio_direction_output(struct gpio_chip *chip, unsigned int pin, int val) { struct wcd_gpio_data *data = gpiochip_get_data(chip); + int ret;
- regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET, - WCD_PIN_MASK(pin), WCD_PIN_MASK(pin)); + ret = regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET, + WCD_PIN_MASK(pin), WCD_PIN_MASK(pin)); + if (ret) + return ret;
return regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET, WCD_PIN_MASK(pin), diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c index 720011019741..384834fbd590 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c @@ -89,8 +89,8 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, }
r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size, - AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | - AMDGPU_PTE_EXECUTABLE); + AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | + AMDGPU_VM_PAGE_EXECUTABLE);
if (r) { DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 2992ce494e00..fded8902346f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2125,13 +2125,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, */ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { - timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, - DMA_RESV_USAGE_BOOKKEEP, - true, timeout); + timeout = drm_sched_entity_flush(&vm->immediate, timeout); if (timeout <= 0) return timeout;
- return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); + return drm_sched_entity_flush(&vm->delayed, timeout); }
/** diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c index 12c7f4b46ea9..b0c003da52fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c @@ -36,40 +36,47 @@
static const char *mmhub_client_ids_v3_0_1[][2] = { [0][0] = "VMC", + [1][0] = "ISPXT", + [2][0] = "ISPIXT", [4][0] = "DCEDMC", [5][0] = "DCEVGA", [6][0] = "MP0", [7][0] = "MP1", - [8][0] = "MPIO", - [16][0] = "HDP", - [17][0] = "LSDMA", - [18][0] = "JPEG", - [19][0] = "VCNU0", - [21][0] = "VSCH", - [22][0] = "VCNU1", - [23][0] = "VCN1", - [32+20][0] = "VCN0", - [2][1] = "DBGUNBIO", + [8][0] = "MPM", + [12][0] = "ISPTNR", + [14][0] = "ISPCRD0", + [15][0] = "ISPCRD1", + [16][0] = "ISPCRD2", + [22][0] = "HDP", + [23][0] = "LSDMA", + [24][0] = "JPEG", + [27][0] = "VSCH", + [28][0] = "VCNU", + [29][0] = "VCN", + [1][1] = "ISPXT", + [2][1] = "ISPIXT", [3][1] = "DCEDWB", [4][1] = "DCEDMC", [5][1] = "DCEVGA", [6][1] = "MP0", [7][1] = "MP1", - [8][1] = "MPIO", - [10][1] = "DBGU0", - [11][1] = "DBGU1", - [12][1] = "DBGU2", - [13][1] = "DBGU3", - [14][1] = "XDP", - [15][1] = "OSSSYS", - [16][1] = "HDP", - [17][1] = "LSDMA", - [18][1] = "JPEG", - [19][1] = "VCNU0", - [20][1] = "VCN0", - [21][1] = "VSCH", - [22][1] = "VCNU1", - [23][1] = "VCN1", + [8][1] = "MPM", + [10][1] = "ISPMWR0", + [11][1] = "ISPMWR1", + [12][1] = "ISPTNR", + [13][1] = "ISPSWR", + [14][1] = "ISPCWR0", + [15][1] = "ISPCWR1", + [16][1] = "ISPCWR2", + [17][1] = "ISPCWR3", + [18][1] = "XDP", + [21][1] = "OSSSYS", + [22][1] = "HDP", + [23][1] = "LSDMA", + [24][1] = "JPEG", + [27][1] = "VSCH", + [28][1] = "VCNU", + [29][1] = "VCN", };
static uint32_t mmhub_v3_0_1_get_invalidate_req(unsigned int vmid, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index aee2212e52f6..33aa23450b3f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -78,8 +78,8 @@ static int kfd_init(void) static void kfd_exit(void) { kfd_cleanup_processes(); - kfd_debugfs_fini(); kfd_process_destroy_wq(); + kfd_debugfs_fini(); kfd_procfs_shutdown(); kfd_topology_shutdown(); kfd_chardev_exit(); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d4edddaa23dd..8421e5f0737b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4620,7 +4620,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) { - drm_atomic_private_obj_fini(&dm->atomic_obj); + if (dm->atomic_obj.state) + drm_atomic_private_obj_fini(&dm->atomic_obj); }
/****************************************************************************** @@ -6778,6 +6779,9 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn, struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn); int ret;
+ if (WARN_ON(unlikely(!old_con_state || !new_con_state))) + return -EINVAL; + trace_amdgpu_dm_connector_atomic_check(new_con_state);
if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 30d4c6fd95f5..dab732c6c6c7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -410,6 +410,15 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, return -EINVAL; }
+ if (!state->legacy_cursor_update && amdgpu_dm_crtc_vrr_active(dm_crtc_state)) { + struct drm_plane_state *primary_state; + + /* Pull in primary plane for correct VRR handling */ + primary_state = drm_atomic_get_plane_state(state, crtc->primary); + if (IS_ERR(primary_state)) + return PTR_ERR(primary_state); + } + /* In some use cases, like reset, no stream is attached */ if (!dm_crtc_state->stream) return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index fe96bab7d05d..67972d25366e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -124,8 +124,10 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) psr_config.allow_multi_disp_optimizations = (amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT);
- if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config)) - return false; + if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { + if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config)) + return false; + }
ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index 818a529cacc3..12a54fabd80e 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -993,7 +993,7 @@ static enum bp_result set_pixel_clock_v3( allocation.sPCLKInput.usFbDiv = cpu_to_le16((uint16_t)bp_params->feedback_divider); allocation.sPCLKInput.ucFracFbDiv = - (uint8_t)bp_params->fractional_feedback_divider; + (uint8_t)(bp_params->fractional_feedback_divider / 100000); allocation.sPCLKInput.ucPostDiv = (uint8_t)bp_params->pixel_clock_post_divider;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index dcedf9645161..719881406f18 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -162,7 +162,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p return NULL; } dce60_clk_mgr_construct(ctx, clk_mgr); - dce_clk_mgr_construct(ctx, clk_mgr); return &clk_mgr->base; } #endif diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c index 26feefbb8990..b268c367c27c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c @@ -386,8 +386,6 @@ static void dce_pplib_apply_display_requirements( { struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
- pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); - dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c index 78df96882d6e..fb2f154f4fda 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c @@ -120,9 +120,15 @@ void dce110_fill_display_configs( const struct dc_state *context, struct dm_pp_display_configuration *pp_display_cfg) { + struct dc *dc = context->clk_mgr->ctx->dc; int j; int num_cfgs = 0;
+ pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); + pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz; + pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; + pp_display_cfg->crtc_index = dc->res_pool->res_cap->num_timing_generator; + for (j = 0; j < context->stream_count; j++) { int k;
@@ -164,6 +170,23 @@ void dce110_fill_display_configs( cfg->v_refresh /= stream->timing.h_total; cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2) / stream->timing.v_total; + + /* Find first CRTC index and calculate its line time. + * This is necessary for DPM on SI GPUs. + */ + if (cfg->pipe_idx < pp_display_cfg->crtc_index) { + const struct dc_crtc_timing *timing = + &context->streams[0]->timing; + + pp_display_cfg->crtc_index = cfg->pipe_idx; + pp_display_cfg->line_time_in_us = + timing->h_total * 10000 / timing->pix_clk_100hz; + } + } + + if (!num_cfgs) { + pp_display_cfg->crtc_index = 0; + pp_display_cfg->line_time_in_us = 0; }
pp_display_cfg->display_count = num_cfgs; @@ -223,25 +246,8 @@ void dce11_pplib_apply_display_requirements( pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw_ctx.bw.dce.sclk_deep_sleep_khz;
- pp_display_cfg->avail_mclk_switch_time_us = - dce110_get_min_vblank_time_us(context); - /* TODO: dce11.2*/ - pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; - - pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz; - dce110_fill_display_configs(context, pp_display_cfg);
- /* TODO: is this still applicable?*/ - if (pp_display_cfg->display_count == 1) { - const struct dc_crtc_timing *timing = - &context->streams[0]->timing; - - pp_display_cfg->crtc_index = - pp_display_cfg->disp_configs[0].pipe_idx; - pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz; - } - if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c index 0267644717b2..ffd0f4a76310 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c @@ -83,22 +83,13 @@ static const struct state_dependent_clocks dce60_max_clks_by_state[] = { static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - int dprefclk_wdivider; - int dp_ref_clk_khz; - int target_div; + struct dc_context *ctx = clk_mgr_base->ctx; + int dp_ref_clk_khz = 0;
- /* DCE6 has no DPREFCLK_CNTL to read DP Reference Clock source */ - - /* Read the mmDENTIST_DISPCLK_CNTL to get the currently - * programmed DID DENTIST_DPREFCLK_WDIVIDER*/ - REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider); - - /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/ - target_div = dentist_get_divider_from_did(dprefclk_wdivider); - - /* Calculate the current DFS clock, in kHz.*/ - dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->base.dentist_vco_freq_khz) / target_div; + if (ASIC_REV_IS_TAHITI_P(ctx->asic_id.hw_internal_rev)) + dp_ref_clk_khz = ctx->dc_bios->fw_info.default_display_engine_pll_frequency; + else + dp_ref_clk_khz = clk_mgr_base->clks.dispclk_khz;
return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz); } @@ -109,8 +100,6 @@ static void dce60_pplib_apply_display_requirements( { struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
- pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); - dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) @@ -123,11 +112,9 @@ static void dce60_update_clocks(struct clk_mgr *clk_mgr_base, { struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dm_pp_power_level_change_request level_change_req; - int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; - - /*TODO: W/A for dal3 linux, investigate why this works */ - if (!clk_mgr_dce->dfs_bypass_active) - patched_disp_clk = patched_disp_clk * 115 / 100; + const int max_disp_clk = + clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz; + int patched_disp_clk = min(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz);
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context); /* get max clock state from PPLIB */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index a825fd6c7fa6..f0b472e84a53 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -163,14 +163,13 @@ static void dcn20_setup_gsl_group_as_lock( }
/* at this point we want to program whether it's to enable or disable */ - if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL && - pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) { + if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL) { pipe_ctx->stream_res.tg->funcs->set_gsl( pipe_ctx->stream_res.tg, &gsl); - - pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( - pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); + if (pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) + pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( + pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); } else BREAK_TO_DEBUGGER(); } @@ -782,7 +781,7 @@ enum dc_status dcn20_enable_stream_timing( return DC_ERROR_UNEXPECTED; }
- hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp); + fsleep(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz));
params.vertical_total_min = stream->adjust.v_total_min; params.vertical_total_max = stream->adjust.v_total_max; diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 9b470812d96a..2ce2d9ff7568 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -137,7 +137,8 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init) } }
- if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init) + if (((!link->wa_flags.dp_keep_receiver_powered) || hw_init) && + (link->type != dc_connection_none)) dpcd_write_rx_power_ctrl(link, false); } } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index 7f8f127e7722..ab6964ca1c2b 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -260,6 +260,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp) return MOD_HDCP_STATUS_FAILURE; }
+ if (!display) + return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
mutex_lock(&psp->hdcp_context.mutex); diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 2997aeed6340..632fc8aed653 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1757,6 +1757,12 @@ static int smu_resume(void *handle)
adev->pm.dpm_enabled = true;
+ if (smu->current_power_limit) { + ret = smu_set_power_limit(smu, smu->current_power_limit); + if (ret && ret != -EOPNOTSUPP) + return ret; + } + dev_info(adev->dev, "SMU is resumed successfully!\n");
return 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 454216bd6f1d..4fabecaa2b41 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -686,7 +686,6 @@ static int vangogh_print_clk_levels(struct smu_context *smu, { DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_t metrics; - struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; @@ -702,31 +701,25 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
switch (clk_type) { case SMU_OD_SCLK: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", - (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", - (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); - } + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); + size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); break; case SMU_OD_CCLK: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", - (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", - (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); - } + size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); + size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); + size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); break; case SMU_OD_RANGE: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", - smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); - size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", - smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); - } + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); + size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", + smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); break; case SMU_SOCCLK: /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index 851f0baf9460..772d8e662278 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -663,7 +663,7 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, * monitor doesn't power down exactly after the throw away read. */ if (!aux->is_remote) { - ret = drm_dp_dpcd_probe(aux, DP_DPCD_REV); + ret = drm_dp_dpcd_probe(aux, DP_LANE0_1_STATUS); if (ret < 0) return ret; } diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index 8a98fa276e8a..96f960bcfd82 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -258,13 +258,13 @@ static int hibmc_load(struct drm_device *dev)
ret = hibmc_hw_init(priv); if (ret) - goto err; + return ret;
ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (ret) { drm_err(dev, "Error initializing VRAM MM; %d\n", ret); - goto err; + return ret; }
ret = hibmc_kms_init(priv); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 6d8e5e5c0cba..37c963d33bb7 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -434,7 +434,10 @@ static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm * function, since the power state is undefined. This applies * atm to the late/early system suspend/resume handlers. */ - if (pm_runtime_get_if_active(rpm->kdev, ignore_usecount) <= 0) + if ((ignore_usecount && + pm_runtime_get_if_active(rpm->kdev) <= 0) || + (!ignore_usecount && + pm_runtime_get_if_in_use(rpm->kdev) <= 0)) return 0; }
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 1113e6b2ec8e..aaf7c338eb96 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -928,7 +928,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, uint64_t off = drm_vma_node_start(&obj->vma_node); const char *madv;
- msm_gem_lock(obj); + if (!msm_gem_trylock(obj)) + return;
stats->all.count++; stats->all.size += obj->size; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 8ddef5443140..631a9aa129bd 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -183,6 +183,12 @@ msm_gem_lock(struct drm_gem_object *obj) dma_resv_lock(obj->resv, NULL); }
+static inline bool __must_check +msm_gem_trylock(struct drm_gem_object *obj) +{ + return dma_resv_trylock(obj->resv); +} + static inline int msm_gem_lock_interruptible(struct drm_gem_object *obj) { diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c index 99296f03371a..07c1ebc2a941 100644 --- a/drivers/gpu/drm/nouveau/nvif/vmm.c +++ b/drivers/gpu/drm/nouveau/nvif/vmm.c @@ -219,7 +219,8 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break; default: WARN_ON(1); - return -EINVAL; + ret = -EINVAL; + goto done; }
memcpy(args->data, argv, argc); diff --git a/drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c index 10febea473cd..6cec796dd463 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c +++ b/drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c @@ -585,6 +585,9 @@ rzg2l_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge, if (mode->clock > 148500) return MODE_CLOCK_HIGH;
+ if (mode->clock < 5803) + return MODE_CLOCK_LOW; + return MODE_OK; }
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index 37c08fac7e7d..80ba34cabca3 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -615,7 +615,6 @@ void ttm_pool_fini(struct ttm_pool *pool) } EXPORT_SYMBOL(ttm_pool_fini);
-/* As long as pages are available make sure to release at least one */ static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc) { @@ -623,9 +622,12 @@ static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
do num_freed += ttm_pool_shrink(); - while (!num_freed && atomic_long_read(&allocated_pages)); + while (num_freed < sc->nr_to_scan && + atomic_long_read(&allocated_pages));
- return num_freed; + sc->nr_scanned = num_freed; + + return num_freed ?: SHRINK_STOP; }
/* Return the number of pages available or SHRINK_EMPTY if we have none */ diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 46ff9c75bb12..8f2423a15c71 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -437,6 +437,9 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev, } spin_unlock(&bdev->lru_lock);
+ if (ret && ret != -ENOENT) + return ret; + spin_lock(&man->move_lock); fence = dma_fence_get(man->move); spin_unlock(&man->move_lock); diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 0b561c1eb59e..7cf17c671da4 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -858,10 +858,12 @@ static int apple_probe(struct hid_device *hdev, return ret; }
- timer_setup(&asc->battery_timer, apple_battery_timer_tick, 0); - mod_timer(&asc->battery_timer, - jiffies + msecs_to_jiffies(APPLE_BATTERY_TIMEOUT_MS)); - apple_fetch_battery(hdev); + if (quirks & APPLE_RDESC_BATTERY) { + timer_setup(&asc->battery_timer, apple_battery_timer_tick, 0); + mod_timer(&asc->battery_timer, + jiffies + msecs_to_jiffies(APPLE_BATTERY_TIMEOUT_MS)); + apple_fetch_battery(hdev); + }
if (quirks & APPLE_BACKLIGHT_CTL) apple_backlight_init(hdev); @@ -873,7 +875,8 @@ static void apple_remove(struct hid_device *hdev) { struct apple_sc *asc = hid_get_drvdata(hdev);
- del_timer_sync(&asc->battery_timer); + if (asc->quirks & APPLE_RDESC_BATTERY) + del_timer_sync(&asc->battery_timer);
hid_hw_stop(hdev); } diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 9bb8daf7f786..4fe1e0bc2449 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -772,16 +772,30 @@ static void magicmouse_enable_mt_work(struct work_struct *work) hid_err(msc->hdev, "unable to request touch data (%d)\n", ret); }
+static bool is_usb_magicmouse2(__u32 vendor, __u32 product) +{ + if (vendor != USB_VENDOR_ID_APPLE) + return false; + return product == USB_DEVICE_ID_APPLE_MAGICMOUSE2; +} + +static bool is_usb_magictrackpad2(__u32 vendor, __u32 product) +{ + if (vendor != USB_VENDOR_ID_APPLE) + return false; + return product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || + product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC; +} + static int magicmouse_fetch_battery(struct hid_device *hdev) { #ifdef CONFIG_HID_BATTERY_STRENGTH struct hid_report_enum *report_enum; struct hid_report *report;
- if (!hdev->battery || hdev->vendor != USB_VENDOR_ID_APPLE || - (hdev->product != USB_DEVICE_ID_APPLE_MAGICMOUSE2 && - hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 && - hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC)) + if (!hdev->battery || + (!is_usb_magicmouse2(hdev->vendor, hdev->product) && + !is_usb_magictrackpad2(hdev->vendor, hdev->product))) return -1;
report_enum = &hdev->report_enum[hdev->battery_report_type]; @@ -843,16 +857,17 @@ static int magicmouse_probe(struct hid_device *hdev, return ret; }
- timer_setup(&msc->battery_timer, magicmouse_battery_timer_tick, 0); - mod_timer(&msc->battery_timer, - jiffies + msecs_to_jiffies(USB_BATTERY_TIMEOUT_MS)); - magicmouse_fetch_battery(hdev); + if (is_usb_magicmouse2(id->vendor, id->product) || + is_usb_magictrackpad2(id->vendor, id->product)) { + timer_setup(&msc->battery_timer, magicmouse_battery_timer_tick, 0); + mod_timer(&msc->battery_timer, + jiffies + msecs_to_jiffies(USB_BATTERY_TIMEOUT_MS)); + magicmouse_fetch_battery(hdev); + }
- if (id->vendor == USB_VENDOR_ID_APPLE && - (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 || - ((id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || - id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) && - hdev->type != HID_TYPE_USBMOUSE))) + if (is_usb_magicmouse2(id->vendor, id->product) || + (is_usb_magictrackpad2(id->vendor, id->product) && + hdev->type != HID_TYPE_USBMOUSE)) return 0;
if (!msc->input) { @@ -908,7 +923,10 @@ static int magicmouse_probe(struct hid_device *hdev,
return 0; err_stop_hw: - del_timer_sync(&msc->battery_timer); + if (is_usb_magicmouse2(id->vendor, id->product) || + is_usb_magictrackpad2(id->vendor, id->product)) + del_timer_sync(&msc->battery_timer); + hid_hw_stop(hdev); return ret; } @@ -919,7 +937,9 @@ static void magicmouse_remove(struct hid_device *hdev)
if (msc) { cancel_delayed_work_sync(&msc->work); - del_timer_sync(&msc->battery_timer); + if (is_usb_magicmouse2(hdev->vendor, hdev->product) || + is_usb_magictrackpad2(hdev->vendor, hdev->product)) + del_timer_sync(&msc->battery_timer); }
hid_hw_stop(hdev); @@ -936,10 +956,8 @@ static __u8 *magicmouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, * 0x05, 0x01, // Usage Page (Generic Desktop) 0 * 0x09, 0x02, // Usage (Mouse) 2 */ - if (hdev->vendor == USB_VENDOR_ID_APPLE && - (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 || - hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || - hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) && + if ((is_usb_magicmouse2(hdev->vendor, hdev->product) || + is_usb_magictrackpad2(hdev->vendor, hdev->product)) && *rsize == 83 && rdesc[46] == 0x84 && rdesc[58] == 0x85) { hid_info(hdev, "fixing up magicmouse battery report descriptor\n"); diff --git a/drivers/hwmon/emc2305.c b/drivers/hwmon/emc2305.c index 29f0e4945f19..840acd5260f4 100644 --- a/drivers/hwmon/emc2305.c +++ b/drivers/hwmon/emc2305.c @@ -303,6 +303,12 @@ static int emc2305_set_single_tz(struct device *dev, int idx) dev_err(dev, "Failed to register cooling device %s\n", emc2305_fan_name[idx]); return PTR_ERR(data->cdev_data[cdev_idx].cdev); } + + if (data->cdev_data[cdev_idx].cur_state > 0) + /* Update pwm when temperature is above trips */ + pwm = EMC2305_PWM_STATE2DUTY(data->cdev_data[cdev_idx].cur_state, + data->max_state, EMC2305_FAN_MAX); + /* Set minimal PWM speed. */ if (data->pwm_separate) { ret = emc2305_set_pwm(dev, pwm, cdev_idx); @@ -316,10 +322,10 @@ static int emc2305_set_single_tz(struct device *dev, int idx) } } data->cdev_data[cdev_idx].cur_state = - EMC2305_PWM_DUTY2STATE(data->pwm_min[cdev_idx], data->max_state, + EMC2305_PWM_DUTY2STATE(pwm, data->max_state, EMC2305_FAN_MAX); data->cdev_data[cdev_idx].last_hwmon_state = - EMC2305_PWM_DUTY2STATE(data->pwm_min[cdev_idx], data->max_state, + EMC2305_PWM_DUTY2STATE(pwm, data->max_state, EMC2305_FAN_MAX); return 0; } diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c index 1501ceb551e7..23238a80c000 100644 --- a/drivers/hwmon/gsc-hwmon.c +++ b/drivers/hwmon/gsc-hwmon.c @@ -65,7 +65,7 @@ static ssize_t pwm_auto_point_temp_show(struct device *dev, return ret;
ret = regs[0] | regs[1] << 8; - return sprintf(buf, "%d\n", ret * 10); + return sprintf(buf, "%d\n", ret * 100); }
static ssize_t pwm_auto_point_temp_store(struct device *dev, @@ -100,7 +100,7 @@ static ssize_t pwm_auto_point_pwm_show(struct device *dev, { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- return sprintf(buf, "%d\n", 255 * (50 + (attr->index * 10))); + return sprintf(buf, "%d\n", 255 * (50 + (attr->index * 10)) / 100); }
static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point1_pwm, pwm_auto_point_pwm, 0); diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index d2499f302b50..f43067f6797e 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c @@ -370,6 +370,7 @@ static const struct acpi_device_id i2c_acpi_force_100khz_device_ids[] = { * the device works without issues on Windows at what is expected to be * a 400KHz frequency. The root cause of the issue is not known. */ + { "DLL0945", 0 }, { "ELAN06FA", 0 }, {} }; diff --git a/drivers/i3c/internals.h b/drivers/i3c/internals.h index 908a807badaf..e267ea5ec5b9 100644 --- a/drivers/i3c/internals.h +++ b/drivers/i3c/internals.h @@ -9,6 +9,7 @@ #define I3C_INTERNALS_H
#include <linux/i3c/master.h> +#include <linux/io.h>
extern struct bus_type i3c_bus_type;
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index 33254bc338b9..b6995e767850 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -1398,7 +1398,7 @@ static int i3c_master_retrieve_dev_info(struct i3c_dev_desc *dev)
if (dev->info.bcr & I3C_BCR_HDR_CAP) { ret = i3c_master_gethdrcap_locked(master, &dev->info); - if (ret) + if (ret && ret != -ENOTSUPP) return ret; }
@@ -2430,6 +2430,8 @@ static int i3c_i2c_notifier_call(struct notifier_block *nb, unsigned long action case BUS_NOTIFY_DEL_DEVICE: ret = i3c_master_i2c_detach(adap, client); break; + default: + ret = -EINVAL; } i3c_bus_maintenance_unlock(&master->bus);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 44842f243f40..6908052dea77 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1432,7 +1432,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { };
static const struct x86_cpu_id intel_mwait_ids[] __initconst = { - X86_MATCH_VENDOR_FAM_FEATURE(INTEL, 6, X86_FEATURE_MWAIT, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(INTEL, X86_FAMILY_ANY, X86_FEATURE_MWAIT, NULL), {} };
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c index 967f06cd3f94..e147eaf1a3b1 100644 --- a/drivers/iio/adc/ad7768-1.c +++ b/drivers/iio/adc/ad7768-1.c @@ -203,6 +203,24 @@ static int ad7768_spi_reg_write(struct ad7768_state *st, return spi_write(st->spi, st->data.d8, 2); }
+static int ad7768_send_sync_pulse(struct ad7768_state *st) +{ + /* + * The datasheet specifies a minimum SYNC_IN pulse width of 1.5 × Tmclk, + * where Tmclk is the MCLK period. The supported MCLK frequencies range + * from 0.6 MHz to 17 MHz, which corresponds to a minimum SYNC_IN pulse + * width of approximately 2.5 µs in the worst-case scenario (0.6 MHz). + * + * Add a delay to ensure the pulse width is always sufficient to + * trigger synchronization. + */ + gpiod_set_value_cansleep(st->gpio_sync_in, 1); + fsleep(3); + gpiod_set_value_cansleep(st->gpio_sync_in, 0); + + return 0; +} + static int ad7768_set_mode(struct ad7768_state *st, enum ad7768_conv_mode mode) { @@ -288,10 +306,7 @@ static int ad7768_set_dig_fil(struct ad7768_state *st, return ret;
/* A sync-in pulse is required every time the filter dec rate changes */ - gpiod_set_value(st->gpio_sync_in, 1); - gpiod_set_value(st->gpio_sync_in, 0); - - return 0; + return ad7768_send_sync_pulse(st); }
static int ad7768_set_freq(struct ad7768_state *st, diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index 533667eefe41..914274ed899e 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -378,7 +378,7 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev) return ret; }
- samples_buf_size = ALIGN(slot * indio_dev->channels[0].scan_type.storagebits, 8); + samples_buf_size = ALIGN(slot * indio_dev->channels[0].scan_type.storagebits / 8, 8); samples_buf_size += sizeof(int64_t); samples_buf = devm_krealloc(&sigma_delta->spi->dev, sigma_delta->samples_buf, samples_buf_size, GFP_KERNEL); @@ -406,7 +406,7 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev) return ret; }
-static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev) +static int ad_sd_buffer_predisable(struct iio_dev *indio_dev) { struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
@@ -534,7 +534,7 @@ static bool ad_sd_validate_scan_mask(struct iio_dev *indio_dev, const unsigned l
static const struct iio_buffer_setup_ops ad_sd_buffer_setup_ops = { .postenable = &ad_sd_buffer_postenable, - .postdisable = &ad_sd_buffer_postdisable, + .predisable = &ad_sd_buffer_predisable, .validate_scan_mask = &ad_sd_validate_scan_mask, };
diff --git a/drivers/iio/imu/bno055/bno055.c b/drivers/iio/imu/bno055/bno055.c index 52744dd98e65..98f17c29da69 100644 --- a/drivers/iio/imu/bno055/bno055.c +++ b/drivers/iio/imu/bno055/bno055.c @@ -118,6 +118,7 @@ struct bno055_sysfs_attr { int len; int *fusion_vals; int *hw_xlate; + int hw_xlate_len; int type; };
@@ -170,20 +171,24 @@ static int bno055_gyr_scale_vals[] = { 1000, 1877467, 2000, 1877467, };
+static int bno055_gyr_scale_hw_xlate[] = {0, 1, 2, 3, 4}; static struct bno055_sysfs_attr bno055_gyr_scale = { .vals = bno055_gyr_scale_vals, .len = ARRAY_SIZE(bno055_gyr_scale_vals), .fusion_vals = (int[]){1, 900}, - .hw_xlate = (int[]){4, 3, 2, 1, 0}, + .hw_xlate = bno055_gyr_scale_hw_xlate, + .hw_xlate_len = ARRAY_SIZE(bno055_gyr_scale_hw_xlate), .type = IIO_VAL_FRACTIONAL, };
static int bno055_gyr_lpf_vals[] = {12, 23, 32, 47, 64, 116, 230, 523}; +static int bno055_gyr_lpf_hw_xlate[] = {5, 4, 7, 3, 6, 2, 1, 0}; static struct bno055_sysfs_attr bno055_gyr_lpf = { .vals = bno055_gyr_lpf_vals, .len = ARRAY_SIZE(bno055_gyr_lpf_vals), .fusion_vals = (int[]){32}, - .hw_xlate = (int[]){5, 4, 7, 3, 6, 2, 1, 0}, + .hw_xlate = bno055_gyr_lpf_hw_xlate, + .hw_xlate_len = ARRAY_SIZE(bno055_gyr_lpf_hw_xlate), .type = IIO_VAL_INT, };
@@ -561,7 +566,7 @@ static int bno055_get_regmask(struct bno055_priv *priv, int *val, int *val2,
idx = (hwval & mask) >> shift; if (attr->hw_xlate) - for (i = 0; i < attr->len; i++) + for (i = 0; i < attr->hw_xlate_len; i++) if (attr->hw_xlate[i] == idx) { idx = i; break; diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h index 94c0eb0bf874..809734e566e3 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h @@ -142,11 +142,11 @@ struct inv_icm42600_state { struct inv_icm42600_suspended suspended; struct iio_dev *indio_gyro; struct iio_dev *indio_accel; - uint8_t buffer[2] __aligned(IIO_DMA_MINALIGN); + u8 buffer[2] __aligned(IIO_DMA_MINALIGN); struct inv_icm42600_fifo fifo; struct { - int64_t gyro; - int64_t accel; + s64 gyro; + s64 accel; } timestamp; };
@@ -369,7 +369,7 @@ const struct iio_mount_matrix * inv_icm42600_get_mount_matrix(const struct iio_dev *indio_dev, const struct iio_chan_spec *chan);
-uint32_t inv_icm42600_odr_to_period(enum inv_icm42600_odr odr); +u32 inv_icm42600_odr_to_period(enum inv_icm42600_odr odr);
int inv_icm42600_set_accel_conf(struct inv_icm42600_state *st, struct inv_icm42600_sensor_conf *conf, diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c index 47720560de6e..a4155939e956 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c @@ -77,8 +77,8 @@ static const struct iio_chan_spec inv_icm42600_accel_channels[] = { */ struct inv_icm42600_accel_buffer { struct inv_icm42600_fifo_sensor_data accel; - int16_t temp; - int64_t timestamp __aligned(8); + s16 temp; + aligned_s64 timestamp; };
#define INV_ICM42600_SCAN_MASK_ACCEL_3AXIS \ @@ -142,7 +142,7 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev,
static int inv_icm42600_accel_read_sensor(struct inv_icm42600_state *st, struct iio_chan_spec const *chan, - int16_t *val) + s16 *val) { struct device *dev = regmap_get_device(st->map); struct inv_icm42600_sensor_conf conf = INV_ICM42600_SENSOR_CONF_INIT; @@ -182,7 +182,7 @@ static int inv_icm42600_accel_read_sensor(struct inv_icm42600_state *st, if (ret) goto exit;
- *val = (int16_t)be16_to_cpup(data); + *val = (s16)be16_to_cpup(data); if (*val == INV_ICM42600_DATA_INVALID) ret = -EINVAL; exit: @@ -359,11 +359,11 @@ static int inv_icm42600_accel_read_offset(struct inv_icm42600_state *st, int *val, int *val2) { struct device *dev = regmap_get_device(st->map); - int64_t val64; - int32_t bias; + s64 val64; + s32 bias; unsigned int reg; - int16_t offset; - uint8_t data[2]; + s16 offset; + u8 data[2]; int ret;
if (chan->type != IIO_ACCEL) @@ -417,7 +417,7 @@ static int inv_icm42600_accel_read_offset(struct inv_icm42600_state *st, * result in micro (1000000) * (offset * 5 * 9.806650 * 1000000) / 10000 */ - val64 = (int64_t)offset * 5LL * 9806650LL; + val64 = (s64)offset * 5LL * 9806650LL; /* for rounding, add + or - divisor (10000) divided by 2 */ if (val64 >= 0) val64 += 10000LL / 2LL; @@ -435,10 +435,10 @@ static int inv_icm42600_accel_write_offset(struct inv_icm42600_state *st, int val, int val2) { struct device *dev = regmap_get_device(st->map); - int64_t val64; - int32_t min, max; + s64 val64; + s32 min, max; unsigned int reg, regval; - int16_t offset; + s16 offset; int ret;
if (chan->type != IIO_ACCEL) @@ -463,7 +463,7 @@ static int inv_icm42600_accel_write_offset(struct inv_icm42600_state *st, inv_icm42600_accel_calibbias[1]; max = inv_icm42600_accel_calibbias[4] * 1000000L + inv_icm42600_accel_calibbias[5]; - val64 = (int64_t)val * 1000000LL + (int64_t)val2; + val64 = (s64)val * 1000000LL + (s64)val2; if (val64 < min || val64 > max) return -EINVAL;
@@ -538,7 +538,7 @@ static int inv_icm42600_accel_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev); - int16_t data; + s16 data; int ret;
switch (chan->type) { @@ -755,7 +755,8 @@ int inv_icm42600_accel_parse_fifo(struct iio_dev *indio_dev) const int8_t *temp; unsigned int odr; int64_t ts_val; - struct inv_icm42600_accel_buffer buffer; + /* buffer is copied to userspace, zeroing it to avoid any data leak */ + struct inv_icm42600_accel_buffer buffer = { };
/* parse all fifo packets */ for (i = 0, no = 0; i < st->fifo.count; i += size, ++no) { @@ -774,8 +775,6 @@ int inv_icm42600_accel_parse_fifo(struct iio_dev *indio_dev) inv_sensors_timestamp_apply_odr(ts, st->fifo.period, st->fifo.nb.total, no);
- /* buffer is copied to userspace, zeroing it to avoid any data leak */ - memset(&buffer, 0, sizeof(buffer)); memcpy(&buffer.accel, accel, sizeof(buffer.accel)); /* convert 8 bits FIFO temperature in high resolution format */ buffer.temp = temp ? (*temp * 64) : 0; diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c index 6ef1df9d60b7..aca6ce758890 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c @@ -26,28 +26,28 @@ #define INV_ICM42600_FIFO_HEADER_ODR_GYRO BIT(0)
struct inv_icm42600_fifo_1sensor_packet { - uint8_t header; + u8 header; struct inv_icm42600_fifo_sensor_data data; - int8_t temp; + s8 temp; } __packed; #define INV_ICM42600_FIFO_1SENSOR_PACKET_SIZE 8
struct inv_icm42600_fifo_2sensors_packet { - uint8_t header; + u8 header; struct inv_icm42600_fifo_sensor_data accel; struct inv_icm42600_fifo_sensor_data gyro; - int8_t temp; + s8 temp; __be16 timestamp; } __packed; #define INV_ICM42600_FIFO_2SENSORS_PACKET_SIZE 16
ssize_t inv_icm42600_fifo_decode_packet(const void *packet, const void **accel, - const void **gyro, const int8_t **temp, + const void **gyro, const s8 **temp, const void **timestamp, unsigned int *odr) { const struct inv_icm42600_fifo_1sensor_packet *pack1 = packet; const struct inv_icm42600_fifo_2sensors_packet *pack2 = packet; - uint8_t header = *((const uint8_t *)packet); + u8 header = *((const u8 *)packet);
/* FIFO empty */ if (header & INV_ICM42600_FIFO_HEADER_MSG) { @@ -100,7 +100,7 @@ ssize_t inv_icm42600_fifo_decode_packet(const void *packet, const void **accel,
void inv_icm42600_buffer_update_fifo_period(struct inv_icm42600_state *st) { - uint32_t period_gyro, period_accel, period; + u32 period_gyro, period_accel, period;
if (st->fifo.en & INV_ICM42600_SENSOR_GYRO) period_gyro = inv_icm42600_odr_to_period(st->conf.gyro.odr); @@ -204,8 +204,8 @@ int inv_icm42600_buffer_update_watermark(struct inv_icm42600_state *st) { size_t packet_size, wm_size; unsigned int wm_gyro, wm_accel, watermark; - uint32_t period_gyro, period_accel, period; - uint32_t latency_gyro, latency_accel, latency; + u32 period_gyro, period_accel, period; + u32 latency_gyro, latency_accel, latency; bool restore; __le16 raw_wm; int ret; @@ -451,7 +451,7 @@ int inv_icm42600_buffer_fifo_read(struct inv_icm42600_state *st, __be16 *raw_fifo_count; ssize_t i, size; const void *accel, *gyro, *timestamp; - const int8_t *temp; + const s8 *temp; unsigned int odr; int ret;
@@ -538,7 +538,7 @@ int inv_icm42600_buffer_hwfifo_flush(struct inv_icm42600_state *st, unsigned int count) { struct inv_sensors_timestamp *ts; - int64_t gyro_ts, accel_ts; + s64 gyro_ts, accel_ts; int ret;
gyro_ts = iio_get_time_ns(st->indio_gyro); diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h index 8b85ee333bf8..eed6a3152acf 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h @@ -28,7 +28,7 @@ struct inv_icm42600_state; struct inv_icm42600_fifo { unsigned int on; unsigned int en; - uint32_t period; + u32 period; struct { unsigned int gyro; unsigned int accel; @@ -39,7 +39,7 @@ struct inv_icm42600_fifo { size_t accel; size_t total; } nb; - uint8_t data[2080] __aligned(IIO_DMA_MINALIGN); + u8 data[2080] __aligned(IIO_DMA_MINALIGN); };
/* FIFO data packet */ @@ -50,7 +50,7 @@ struct inv_icm42600_fifo_sensor_data { } __packed; #define INV_ICM42600_FIFO_DATA_INVALID -32768
-static inline int16_t inv_icm42600_fifo_get_sensor_data(__be16 d) +static inline s16 inv_icm42600_fifo_get_sensor_data(__be16 d) { return be16_to_cpu(d); } @@ -58,7 +58,7 @@ static inline int16_t inv_icm42600_fifo_get_sensor_data(__be16 d) static inline bool inv_icm42600_fifo_is_data_valid(const struct inv_icm42600_fifo_sensor_data *s) { - int16_t x, y, z; + s16 x, y, z;
x = inv_icm42600_fifo_get_sensor_data(s->x); y = inv_icm42600_fifo_get_sensor_data(s->y); @@ -73,7 +73,7 @@ inv_icm42600_fifo_is_data_valid(const struct inv_icm42600_fifo_sensor_data *s) }
ssize_t inv_icm42600_fifo_decode_packet(const void *packet, const void **accel, - const void **gyro, const int8_t **temp, + const void **gyro, const s8 **temp, const void **timestamp, unsigned int *odr);
extern const struct iio_buffer_setup_ops inv_icm42600_buffer_ops; diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c index da65aa4e2724..91c181bb9286 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c @@ -56,7 +56,7 @@ const struct regmap_config inv_icm42600_spi_regmap_config = { EXPORT_SYMBOL_NS_GPL(inv_icm42600_spi_regmap_config, IIO_ICM42600);
struct inv_icm42600_hw { - uint8_t whoami; + u8 whoami; const char *name; const struct inv_icm42600_conf *conf; }; @@ -115,9 +115,9 @@ inv_icm42600_get_mount_matrix(const struct iio_dev *indio_dev, return &st->orientation; }
-uint32_t inv_icm42600_odr_to_period(enum inv_icm42600_odr odr) +u32 inv_icm42600_odr_to_period(enum inv_icm42600_odr odr) { - static uint32_t odr_periods[INV_ICM42600_ODR_NB] = { + static u32 odr_periods[INV_ICM42600_ODR_NB] = { /* reserved values */ 0, 0, 0, /* 8kHz */ diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c index d08cd6839a3a..9ee26478b666 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c @@ -77,8 +77,8 @@ static const struct iio_chan_spec inv_icm42600_gyro_channels[] = { */ struct inv_icm42600_gyro_buffer { struct inv_icm42600_fifo_sensor_data gyro; - int16_t temp; - int64_t timestamp __aligned(8); + s16 temp; + aligned_s64 timestamp; };
#define INV_ICM42600_SCAN_MASK_GYRO_3AXIS \ @@ -142,7 +142,7 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev,
static int inv_icm42600_gyro_read_sensor(struct inv_icm42600_state *st, struct iio_chan_spec const *chan, - int16_t *val) + s16 *val) { struct device *dev = regmap_get_device(st->map); struct inv_icm42600_sensor_conf conf = INV_ICM42600_SENSOR_CONF_INIT; @@ -182,7 +182,7 @@ static int inv_icm42600_gyro_read_sensor(struct inv_icm42600_state *st, if (ret) goto exit;
- *val = (int16_t)be16_to_cpup(data); + *val = (s16)be16_to_cpup(data); if (*val == INV_ICM42600_DATA_INVALID) ret = -EINVAL; exit: @@ -371,11 +371,11 @@ static int inv_icm42600_gyro_read_offset(struct inv_icm42600_state *st, int *val, int *val2) { struct device *dev = regmap_get_device(st->map); - int64_t val64; - int32_t bias; + s64 val64; + s32 bias; unsigned int reg; - int16_t offset; - uint8_t data[2]; + s16 offset; + u8 data[2]; int ret;
if (chan->type != IIO_ANGL_VEL) @@ -429,7 +429,7 @@ static int inv_icm42600_gyro_read_offset(struct inv_icm42600_state *st, * result in nano (1000000000) * (offset * 64 * Pi * 1000000000) / (2048 * 180) */ - val64 = (int64_t)offset * 64LL * 3141592653LL; + val64 = (s64)offset * 64LL * 3141592653LL; /* for rounding, add + or - divisor (2048 * 180) divided by 2 */ if (val64 >= 0) val64 += 2048 * 180 / 2; @@ -447,9 +447,9 @@ static int inv_icm42600_gyro_write_offset(struct inv_icm42600_state *st, int val, int val2) { struct device *dev = regmap_get_device(st->map); - int64_t val64, min, max; + s64 val64, min, max; unsigned int reg, regval; - int16_t offset; + s16 offset; int ret;
if (chan->type != IIO_ANGL_VEL) @@ -470,11 +470,11 @@ static int inv_icm42600_gyro_write_offset(struct inv_icm42600_state *st, }
/* inv_icm42600_gyro_calibbias: min - step - max in nano */ - min = (int64_t)inv_icm42600_gyro_calibbias[0] * 1000000000LL + - (int64_t)inv_icm42600_gyro_calibbias[1]; - max = (int64_t)inv_icm42600_gyro_calibbias[4] * 1000000000LL + - (int64_t)inv_icm42600_gyro_calibbias[5]; - val64 = (int64_t)val * 1000000000LL + (int64_t)val2; + min = (s64)inv_icm42600_gyro_calibbias[0] * 1000000000LL + + (s64)inv_icm42600_gyro_calibbias[1]; + max = (s64)inv_icm42600_gyro_calibbias[4] * 1000000000LL + + (s64)inv_icm42600_gyro_calibbias[5]; + val64 = (s64)val * 1000000000LL + (s64)val2; if (val64 < min || val64 > max) return -EINVAL;
@@ -549,7 +549,7 @@ static int inv_icm42600_gyro_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev); - int16_t data; + s16 data; int ret;
switch (chan->type) { @@ -764,10 +764,11 @@ int inv_icm42600_gyro_parse_fifo(struct iio_dev *indio_dev) ssize_t i, size; unsigned int no; const void *accel, *gyro, *timestamp; - const int8_t *temp; + const s8 *temp; unsigned int odr; - int64_t ts_val; - struct inv_icm42600_gyro_buffer buffer; + s64 ts_val; + /* buffer is copied to userspace, zeroing it to avoid any data leak */ + struct inv_icm42600_gyro_buffer buffer = { };
/* parse all fifo packets */ for (i = 0, no = 0; i < st->fifo.count; i += size, ++no) { @@ -786,8 +787,6 @@ int inv_icm42600_gyro_parse_fifo(struct iio_dev *indio_dev) inv_sensors_timestamp_apply_odr(ts, st->fifo.period, st->fifo.nb.total, no);
- /* buffer is copied to userspace, zeroing it to avoid any data leak */ - memset(&buffer, 0, sizeof(buffer)); memcpy(&buffer.gyro, gyro, sizeof(buffer.gyro)); /* convert 8 bits FIFO temperature in high resolution format */ buffer.temp = temp ? (*temp * 64) : 0; diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c index 91f0f381082b..51430b4f5e51 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c @@ -13,7 +13,7 @@ #include "inv_icm42600.h" #include "inv_icm42600_temp.h"
-static int inv_icm42600_temp_read(struct inv_icm42600_state *st, int16_t *temp) +static int inv_icm42600_temp_read(struct inv_icm42600_state *st, s16 *temp) { struct device *dev = regmap_get_device(st->map); __be16 *raw; @@ -31,9 +31,13 @@ static int inv_icm42600_temp_read(struct inv_icm42600_state *st, int16_t *temp) if (ret) goto exit;
- *temp = (int16_t)be16_to_cpup(raw); + *temp = (s16)be16_to_cpup(raw); + /* + * Temperature data is invalid if both accel and gyro are off. + * Return -EBUSY in this case. + */ if (*temp == INV_ICM42600_DATA_INVALID) - ret = -EINVAL; + ret = -EBUSY;
exit: mutex_unlock(&st->lock); @@ -48,7 +52,7 @@ int inv_icm42600_temp_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev); - int16_t temp; + s16 temp; int ret;
if (chan->type != IIO_TEMP) diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c index c1f9604c2714..166874c68ff6 100644 --- a/drivers/iio/light/as73211.c +++ b/drivers/iio/light/as73211.c @@ -573,7 +573,7 @@ static irqreturn_t as73211_trigger_handler(int irq __always_unused, void *p) struct { __le16 chan[4]; s64 ts __aligned(8); - } scan; + } scan = { }; int data_result, ret;
mutex_lock(&data->mutex); diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index 84f6b333c919..8d9e8a1c94c4 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c @@ -2152,11 +2152,12 @@ int bmp280_common_probe(struct device *dev,
/* Bring chip out of reset if there is an assigned GPIO line */ gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(gpiod)) + return dev_err_probe(dev, PTR_ERR(gpiod), "failed to get reset GPIO\n"); + /* Deassert the signal */ - if (gpiod) { - dev_info(dev, "release reset\n"); - gpiod_set_value(gpiod, 0); - } + dev_info(dev, "release reset\n"); + gpiod_set_value(gpiod, 0);
data->regmap = regmap;
diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c index bcebacaf3dab..9fd3d2e8cea6 100644 --- a/drivers/iio/proximity/isl29501.c +++ b/drivers/iio/proximity/isl29501.c @@ -938,12 +938,18 @@ static irqreturn_t isl29501_trigger_handler(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct isl29501_private *isl29501 = iio_priv(indio_dev); const unsigned long *active_mask = indio_dev->active_scan_mask; - u32 buffer[4] __aligned(8) = {}; /* 1x16-bit + naturally aligned ts */ - - if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask)) - isl29501_register_read(isl29501, REG_DISTANCE, buffer); + u32 value; + struct { + u16 data; + aligned_s64 ts; + } scan = { }; + + if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask)) { + isl29501_register_read(isl29501, REG_DISTANCE, &value); + scan.data = value; + }
- iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); + iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp); iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED; diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c index 555a61e2f3fd..44fba61ccfe2 100644 --- a/drivers/iio/temperature/maxim_thermocouple.c +++ b/drivers/iio/temperature/maxim_thermocouple.c @@ -12,6 +12,7 @@ #include <linux/mutex.h> #include <linux/err.h> #include <linux/spi/spi.h> +#include <linux/types.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/iio/trigger.h> @@ -122,8 +123,15 @@ struct maxim_thermocouple_data { struct spi_device *spi; const struct maxim_thermocouple_chip *chip; char tc_type; - - u8 buffer[16] __aligned(IIO_DMA_MINALIGN); + /* Buffer for reading up to 2 hardware channels. */ + struct { + union { + __be16 raw16; + __be32 raw32; + __be16 raw[2]; + }; + aligned_s64 timestamp; + } buffer __aligned(IIO_DMA_MINALIGN); };
static int maxim_thermocouple_read(struct maxim_thermocouple_data *data, @@ -131,18 +139,16 @@ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data, { unsigned int storage_bytes = data->chip->read_size; unsigned int shift = chan->scan_type.shift + (chan->address * 8); - __be16 buf16; - __be32 buf32; int ret;
switch (storage_bytes) { case 2: - ret = spi_read(data->spi, (void *)&buf16, storage_bytes); - *val = be16_to_cpu(buf16); + ret = spi_read(data->spi, &data->buffer.raw16, storage_bytes); + *val = be16_to_cpu(data->buffer.raw16); break; case 4: - ret = spi_read(data->spi, (void *)&buf32, storage_bytes); - *val = be32_to_cpu(buf32); + ret = spi_read(data->spi, &data->buffer.raw32, storage_bytes); + *val = be32_to_cpu(data->buffer.raw32); break; default: ret = -EINVAL; @@ -167,9 +173,9 @@ static irqreturn_t maxim_thermocouple_trigger_handler(int irq, void *private) struct maxim_thermocouple_data *data = iio_priv(indio_dev); int ret;
- ret = spi_read(data->spi, data->buffer, data->chip->read_size); + ret = spi_read(data->spi, data->buffer.raw, data->chip->read_size); if (!ret) { - iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, + iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer, iio_get_time_ns(indio_dev)); }
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 6d1dbc978759..a94723a12bb4 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -1412,10 +1412,11 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
};
-static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, - struct netlink_ext_ack *extack, - enum rdma_restrack_type res_type, - res_fill_func_t fill_func) +static noinline_for_stack int +res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack, + enum rdma_restrack_type res_type, + res_fill_func_t fill_func) { const struct nldev_fill_res_entry *fe = &fill_entries[res_type]; struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; @@ -2153,10 +2154,10 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh, return ret; }
-static int stat_get_doit_default_counter(struct sk_buff *skb, - struct nlmsghdr *nlh, - struct netlink_ext_ack *extack, - struct nlattr *tb[]) +static noinline_for_stack int +stat_get_doit_default_counter(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack, + struct nlattr *tb[]) { struct rdma_hw_stats *stats; struct nlattr *table_attr; @@ -2246,8 +2247,9 @@ static int stat_get_doit_default_counter(struct sk_buff *skb, return ret; }
-static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh, - struct netlink_ext_ack *extack, struct nlattr *tb[]) +static noinline_for_stack int +stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack, struct nlattr *tb[])
{ static enum rdma_nl_counter_mode mode; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index f7345e4890a1..31fff5885f1a 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -1823,7 +1823,6 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); struct bnxt_re_dev *rdev = srq->rdev; - int rc;
switch (srq_attr_mask) { case IB_SRQ_MAX_WR: @@ -1835,11 +1834,8 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, return -EINVAL;
srq->qplib_srq.threshold = srq_attr->srq_limit; - rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq); - if (rc) { - ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!"); - return rc; - } + bnxt_qplib_srq_arm_db(&srq->qplib_srq.dbinfo, srq->qplib_srq.threshold); + /* On success, update the shadow */ srq->srq_limit = srq_attr->srq_limit; /* No need to Build and send response back to udata */ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 68ea4ed0b171..c19dd732c235 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -685,9 +685,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, srq->dbinfo.db = srq->dpi->dbr; srq->dbinfo.max_slot = 1; srq->dbinfo.priv_db = res->dpi_tbl.priv_db; - if (srq->threshold) - bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA); - srq->arm_req = false; + bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
return 0; fail: @@ -697,24 +695,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, return rc; }
-int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res, - struct bnxt_qplib_srq *srq) -{ - struct bnxt_qplib_hwq *srq_hwq = &srq->hwq; - u32 count; - - count = __bnxt_qplib_get_avail(srq_hwq); - if (count > srq->threshold) { - srq->arm_req = false; - bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold); - } else { - /* Deferred arming */ - srq->arm_req = true; - } - - return 0; -} - int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq) { @@ -756,7 +736,6 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq, struct bnxt_qplib_hwq *srq_hwq = &srq->hwq; struct rq_wqe *srqe; struct sq_sge *hw_sge; - u32 count = 0; int i, next;
spin_lock(&srq_hwq->lock); @@ -788,15 +767,8 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
- spin_lock(&srq_hwq->lock); - count = __bnxt_qplib_get_avail(srq_hwq); - spin_unlock(&srq_hwq->lock); /* Ring DB */ bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ); - if (srq->arm_req == true && count > srq->threshold) { - srq->arm_req = false; - bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold); - }
return 0; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index 55fd840359ef..288196facfd7 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -519,8 +519,6 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, srqn_handler_t srq_handler); int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq); -int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res, - struct bnxt_qplib_srq *srq); int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq); void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 96ceec1e8199..77da7cf34427 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -121,6 +121,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res, pbl->pg_arr = vmalloc_array(pages, sizeof(void *)); if (!pbl->pg_arr) return -ENOMEM; + memset(pbl->pg_arr, 0, pages * sizeof(void *));
pbl->pg_map_arr = vmalloc_array(pages, sizeof(dma_addr_t)); if (!pbl->pg_map_arr) { @@ -128,6 +129,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res, pbl->pg_arr = NULL; return -ENOMEM; } + memset(pbl->pg_map_arr, 0, pages * sizeof(dma_addr_t)); pbl->pg_count = 0; pbl->pg_size = sginfo->pgsize;
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c index 29ad2f5ffabe..e990690d8b3c 100644 --- a/drivers/infiniband/hw/erdma/erdma_verbs.c +++ b/drivers/infiniband/hw/erdma/erdma_verbs.c @@ -979,7 +979,9 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, if (ret) goto err_out_cmd; } else { - init_kernel_qp(dev, qp, attrs); + ret = init_kernel_qp(dev, qp, attrs); + if (ret) + goto err_out_xa; }
qp->attrs.max_send_sge = attrs->cap.max_send_sge; diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index bbc957c578e1..e5db39f4720d 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -964,31 +964,35 @@ static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask, struct hfi1_affinity_node_list *affinity) { int possible, curr_cpu, i; - uint num_cores_per_socket = node_affinity.num_online_cpus / + uint num_cores_per_socket; + + cpumask_copy(hw_thread_mask, &affinity->proc.mask); + + if (affinity->num_core_siblings == 0) + return; + + num_cores_per_socket = node_affinity.num_online_cpus / affinity->num_core_siblings / node_affinity.num_online_nodes;
- cpumask_copy(hw_thread_mask, &affinity->proc.mask); - if (affinity->num_core_siblings > 0) { - /* Removing other siblings not needed for now */ - possible = cpumask_weight(hw_thread_mask); - curr_cpu = cpumask_first(hw_thread_mask); - for (i = 0; - i < num_cores_per_socket * node_affinity.num_online_nodes; - i++) - curr_cpu = cpumask_next(curr_cpu, hw_thread_mask); - - for (; i < possible; i++) { - cpumask_clear_cpu(curr_cpu, hw_thread_mask); - curr_cpu = cpumask_next(curr_cpu, hw_thread_mask); - } + /* Removing other siblings not needed for now */ + possible = cpumask_weight(hw_thread_mask); + curr_cpu = cpumask_first(hw_thread_mask); + for (i = 0; + i < num_cores_per_socket * node_affinity.num_online_nodes; + i++) + curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
- /* Identifying correct HW threads within physical cores */ - cpumask_shift_left(hw_thread_mask, hw_thread_mask, - num_cores_per_socket * - node_affinity.num_online_nodes * - hw_thread_no); + for (; i < possible; i++) { + cpumask_clear_cpu(curr_cpu, hw_thread_mask); + curr_cpu = cpumask_next(curr_cpu, hw_thread_mask); } + + /* Identifying correct HW threads within physical cores */ + cpumask_shift_left(hw_thread_mask, hw_thread_mask, + num_cores_per_socket * + node_affinity.num_online_nodes * + hw_thread_no); }
int hfi1_get_proc_affinity(int node) diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index feae920784be..03e3f6668840 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -340,18 +340,17 @@ static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset, if (!sendpage_ok(page[i])) msg.msg_flags &= ~MSG_SPLICE_PAGES; bvec_set_page(&bvec, page[i], bytes, offset); - iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, bytes);
try_page_again: lock_sock(sk); - rv = tcp_sendmsg_locked(sk, &msg, size); + rv = tcp_sendmsg_locked(sk, &msg, bytes); release_sock(sk);
if (rv > 0) { size -= rv; sent += rv; if (rv != bytes) { - offset += rv; bytes -= rv; goto try_page_again; } diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 2e7a12f30651..431cea41df2a 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -3625,7 +3625,7 @@ static int __init parse_ivrs_acpihid(char *str) { u32 seg = 0, bus, dev, fn; char *hid, *uid, *p, *addr; - char acpiid[ACPIID_LEN] = {0}; + char acpiid[ACPIID_LEN + 1] = { }; /* size with NULL terminator */ int i;
addr = strchr(str, '@'); @@ -3651,7 +3651,7 @@ static int __init parse_ivrs_acpihid(char *str) /* We have the '@', make it the terminator to get just the acpiid */ *addr++ = 0;
- if (strlen(str) > ACPIID_LEN + 1) + if (strlen(str) > ACPIID_LEN) goto not_found;
if (sscanf(str, "=%s", acpiid) != 1) diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index e6b4bab0dde2..3d0313669796 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -255,6 +255,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { { .compatible = "qcom,sdm670-mdss" }, { .compatible = "qcom,sdm845-mdss" }, { .compatible = "qcom,sdm845-mss-pil" }, + { .compatible = "qcom,sm6115-mdss" }, { .compatible = "qcom,sm6350-mdss" }, { .compatible = "qcom,sm6375-mdss" }, { .compatible = "qcom,sm8150-mdss" }, diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c index e76b22939994..f058405c5fbb 100644 --- a/drivers/iommu/iommufd/io_pagetable.c +++ b/drivers/iommu/iommufd/io_pagetable.c @@ -69,36 +69,45 @@ struct iopt_area *iopt_area_contig_next(struct iopt_area_contig_iter *iter) return iter->area; }
-static bool __alloc_iova_check_hole(struct interval_tree_double_span_iter *span, - unsigned long length, - unsigned long iova_alignment, - unsigned long page_offset) +static bool __alloc_iova_check_range(unsigned long *start, unsigned long last, + unsigned long length, + unsigned long iova_alignment, + unsigned long page_offset) { - if (span->is_used || span->last_hole - span->start_hole < length - 1) + unsigned long aligned_start; + + /* ALIGN_UP() */ + if (check_add_overflow(*start, iova_alignment - 1, &aligned_start)) return false; + aligned_start &= ~(iova_alignment - 1); + aligned_start |= page_offset;
- span->start_hole = ALIGN(span->start_hole, iova_alignment) | - page_offset; - if (span->start_hole > span->last_hole || - span->last_hole - span->start_hole < length - 1) + if (aligned_start >= last || last - aligned_start < length - 1) return false; + *start = aligned_start; return true; }
-static bool __alloc_iova_check_used(struct interval_tree_span_iter *span, +static bool __alloc_iova_check_hole(struct interval_tree_double_span_iter *span, unsigned long length, unsigned long iova_alignment, unsigned long page_offset) { - if (span->is_hole || span->last_used - span->start_used < length - 1) + if (span->is_used) return false; + return __alloc_iova_check_range(&span->start_hole, span->last_hole, + length, iova_alignment, page_offset); +}
- span->start_used = ALIGN(span->start_used, iova_alignment) | - page_offset; - if (span->start_used > span->last_used || - span->last_used - span->start_used < length - 1) +static bool __alloc_iova_check_used(struct interval_tree_span_iter *span, + unsigned long length, + unsigned long iova_alignment, + unsigned long page_offset) +{ + if (span->is_hole) return false; - return true; + return __alloc_iova_check_range(&span->start_used, span->last_used, + length, iova_alignment, page_offset); }
/* @@ -524,8 +533,10 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start, iommufd_access_notify_unmap(iopt, area_first, length); /* Something is not responding to unmap requests. */ tries++; - if (WARN_ON(tries > 100)) - return -EDEADLOCK; + if (WARN_ON(tries > 100)) { + rc = -EDEADLOCK; + goto out_unmapped; + } goto again; }
@@ -547,6 +558,7 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start, out_unlock_iova: up_write(&iopt->iova_rwsem); up_read(&iopt->domains_rwsem); +out_unmapped: if (unmapped) *unmapped = unmapped_bytes; return rc; diff --git a/drivers/leds/flash/leds-qcom-flash.c b/drivers/leds/flash/leds-qcom-flash.c index 17391aefeb94..a619dbe01524 100644 --- a/drivers/leds/flash/leds-qcom-flash.c +++ b/drivers/leds/flash/leds-qcom-flash.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */
#include <linux/bitfield.h> @@ -14,6 +14,9 @@ #include <media/v4l2-flash-led-class.h>
/* registers definitions */ +#define FLASH_REVISION_REG 0x00 +#define FLASH_4CH_REVISION_V0P1 0x01 + #define FLASH_TYPE_REG 0x04 #define FLASH_TYPE_VAL 0x18
@@ -73,6 +76,16 @@
#define UA_PER_MA 1000
+/* thermal threshold constants */ +#define OTST_3CH_MIN_VAL 3 +#define OTST1_4CH_MIN_VAL 0 +#define OTST1_4CH_V0P1_MIN_VAL 3 +#define OTST2_4CH_MIN_VAL 0 + +#define OTST1_MAX_CURRENT_MA 1000 +#define OTST2_MAX_CURRENT_MA 500 +#define OTST3_MAX_CURRENT_MA 200 + enum hw_type { QCOM_MVFLASH_3CH, QCOM_MVFLASH_4CH, @@ -98,10 +111,13 @@ enum { REG_IRESOLUTION, REG_CHAN_STROBE, REG_CHAN_EN, + REG_THERM_THRSH1, + REG_THERM_THRSH2, + REG_THERM_THRSH3, REG_MAX_COUNT, };
-static struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = { +static const struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = { REG_FIELD(0x08, 0, 7), /* status1 */ REG_FIELD(0x09, 0, 7), /* status2 */ REG_FIELD(0x0a, 0, 7), /* status3 */ @@ -111,9 +127,12 @@ static struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = { REG_FIELD(0x47, 0, 5), /* iresolution */ REG_FIELD_ID(0x49, 0, 2, 3, 1), /* chan_strobe */ REG_FIELD(0x4c, 0, 2), /* chan_en */ + REG_FIELD(0x56, 0, 2), /* therm_thrsh1 */ + REG_FIELD(0x57, 0, 2), /* therm_thrsh2 */ + REG_FIELD(0x58, 0, 2), /* therm_thrsh3 */ };
-static struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = { +static const struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = { REG_FIELD(0x06, 0, 7), /* status1 */ REG_FIELD(0x07, 0, 6), /* status2 */ REG_FIELD(0x09, 0, 7), /* status3 */ @@ -123,6 +142,8 @@ static struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = { REG_FIELD(0x49, 0, 3), /* iresolution */ REG_FIELD_ID(0x4a, 0, 6, 4, 1), /* chan_strobe */ REG_FIELD(0x4e, 0, 3), /* chan_en */ + REG_FIELD(0x7a, 0, 2), /* therm_thrsh1 */ + REG_FIELD(0x78, 0, 2), /* therm_thrsh2 */ };
struct qcom_flash_data { @@ -130,9 +151,11 @@ struct qcom_flash_data { struct regmap_field *r_fields[REG_MAX_COUNT]; struct mutex lock; enum hw_type hw_type; + u32 total_ma; u8 leds_count; u8 max_channels; u8 chan_en_bits; + u8 revision; };
struct qcom_flash_led { @@ -143,6 +166,7 @@ struct qcom_flash_led { u32 max_timeout_ms; u32 flash_current_ma; u32 flash_timeout_ms; + u32 current_in_use_ma; u8 *chan_id; u8 chan_count; bool enabled; @@ -172,6 +196,127 @@ static int set_flash_module_en(struct qcom_flash_led *led, bool en) return rc; }
+static int update_allowed_flash_current(struct qcom_flash_led *led, u32 *current_ma, bool strobe) +{ + struct qcom_flash_data *flash_data = led->flash_data; + u32 therm_ma, avail_ma, thrsh[3], min_thrsh, sts; + int rc = 0; + + mutex_lock(&flash_data->lock); + /* + * Put previously allocated current into allowed budget in either of these two cases: + * 1) LED is disabled; + * 2) LED is enabled repeatedly + */ + if (!strobe || led->current_in_use_ma != 0) { + if (flash_data->total_ma >= led->current_in_use_ma) + flash_data->total_ma -= led->current_in_use_ma; + else + flash_data->total_ma = 0; + + led->current_in_use_ma = 0; + if (!strobe) + goto unlock; + } + + /* + * Cache the default thermal threshold settings, and set them to the lowest levels before + * reading over-temp real time status. If over-temp has been triggered at the lowest + * threshold, it's very likely that it would be triggered at a higher (default) threshold + * when more flash current is requested. Prevent device from triggering over-temp condition + * by limiting the flash current for the new request. + */ + rc = regmap_field_read(flash_data->r_fields[REG_THERM_THRSH1], &thrsh[0]); + if (rc < 0) + goto unlock; + + rc = regmap_field_read(flash_data->r_fields[REG_THERM_THRSH2], &thrsh[1]); + if (rc < 0) + goto unlock; + + if (flash_data->hw_type == QCOM_MVFLASH_3CH) { + rc = regmap_field_read(flash_data->r_fields[REG_THERM_THRSH3], &thrsh[2]); + if (rc < 0) + goto unlock; + } + + min_thrsh = OTST_3CH_MIN_VAL; + if (flash_data->hw_type == QCOM_MVFLASH_4CH) + min_thrsh = (flash_data->revision == FLASH_4CH_REVISION_V0P1) ? + OTST1_4CH_V0P1_MIN_VAL : OTST1_4CH_MIN_VAL; + + rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH1], min_thrsh); + if (rc < 0) + goto unlock; + + if (flash_data->hw_type == QCOM_MVFLASH_4CH) + min_thrsh = OTST2_4CH_MIN_VAL; + + /* + * The default thermal threshold settings have been updated hence + * restore them if any fault happens starting from here. + */ + rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH2], min_thrsh); + if (rc < 0) + goto restore; + + if (flash_data->hw_type == QCOM_MVFLASH_3CH) { + rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH3], min_thrsh); + if (rc < 0) + goto restore; + } + + /* Read thermal level status to get corresponding derating flash current */ + rc = regmap_field_read(flash_data->r_fields[REG_STATUS2], &sts); + if (rc) + goto restore; + + therm_ma = FLASH_TOTAL_CURRENT_MAX_UA / 1000; + if (flash_data->hw_type == QCOM_MVFLASH_3CH) { + if (sts & FLASH_STS_3CH_OTST3) + therm_ma = OTST3_MAX_CURRENT_MA; + else if (sts & FLASH_STS_3CH_OTST2) + therm_ma = OTST2_MAX_CURRENT_MA; + else if (sts & FLASH_STS_3CH_OTST1) + therm_ma = OTST1_MAX_CURRENT_MA; + } else { + if (sts & FLASH_STS_4CH_OTST2) + therm_ma = OTST2_MAX_CURRENT_MA; + else if (sts & FLASH_STS_4CH_OTST1) + therm_ma = OTST1_MAX_CURRENT_MA; + } + + /* Calculate the allowed flash current for the request */ + if (therm_ma <= flash_data->total_ma) + avail_ma = 0; + else + avail_ma = therm_ma - flash_data->total_ma; + + *current_ma = min_t(u32, *current_ma, avail_ma); + led->current_in_use_ma = *current_ma; + flash_data->total_ma += led->current_in_use_ma; + + dev_dbg(led->flash.led_cdev.dev, "allowed flash current: %dmA, total current: %dmA\n", + led->current_in_use_ma, flash_data->total_ma); + +restore: + /* Restore to default thermal threshold settings */ + rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH1], thrsh[0]); + if (rc < 0) + goto unlock; + + rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH2], thrsh[1]); + if (rc < 0) + goto unlock; + + if (flash_data->hw_type == QCOM_MVFLASH_3CH) + rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH3], thrsh[2]); + +unlock: + mutex_unlock(&flash_data->lock); + return rc; +} + static int set_flash_current(struct qcom_flash_led *led, u32 current_ma, enum led_mode mode) { struct qcom_flash_data *flash_data = led->flash_data; @@ -313,6 +458,10 @@ static int qcom_flash_strobe_set(struct led_classdev_flash *fled_cdev, bool stat if (rc) return rc;
+ rc = update_allowed_flash_current(led, &led->flash_current_ma, state); + if (rc < 0) + return rc; + rc = set_flash_current(led, led->flash_current_ma, FLASH_MODE); if (rc) return rc; @@ -429,6 +578,10 @@ static int qcom_flash_led_brightness_set(struct led_classdev *led_cdev, if (rc) return rc;
+ rc = update_allowed_flash_current(led, ¤t_ma, enable); + if (rc < 0) + return rc; + rc = set_flash_current(led, current_ma, TORCH_MODE); if (rc) return rc; @@ -702,11 +855,25 @@ static int qcom_flash_led_probe(struct platform_device *pdev) if (val == FLASH_SUBTYPE_3CH_PM8150_VAL || val == FLASH_SUBTYPE_3CH_PMI8998_VAL) { flash_data->hw_type = QCOM_MVFLASH_3CH; flash_data->max_channels = 3; - regs = mvflash_3ch_regs; + regs = devm_kmemdup(dev, mvflash_3ch_regs, sizeof(mvflash_3ch_regs), + GFP_KERNEL); + if (!regs) + return -ENOMEM; } else if (val == FLASH_SUBTYPE_4CH_VAL) { flash_data->hw_type = QCOM_MVFLASH_4CH; flash_data->max_channels = 4; - regs = mvflash_4ch_regs; + regs = devm_kmemdup(dev, mvflash_4ch_regs, sizeof(mvflash_4ch_regs), + GFP_KERNEL); + if (!regs) + return -ENOMEM; + + rc = regmap_read(regmap, reg_base + FLASH_REVISION_REG, &val); + if (rc < 0) { + dev_err(dev, "Failed to read flash LED module revision, rc=%d\n", rc); + return rc; + } + + flash_data->revision = val; } else { dev_err(dev, "flash LED subtype %#x is not yet supported\n", val); return -ENODEV; @@ -720,6 +887,7 @@ static int qcom_flash_led_probe(struct platform_device *pdev) dev_err(dev, "Failed to allocate regmap field, rc=%d\n", rc); return rc; } + devm_kfree(dev, regs); /* devm_regmap_field_bulk_alloc() makes copies */
platform_set_drvdata(pdev, flash_data); mutex_init(&flash_data->lock); diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c index 68c4d9967d68..182a590b0267 100644 --- a/drivers/leds/leds-lp50xx.c +++ b/drivers/leds/leds-lp50xx.c @@ -486,6 +486,7 @@ static int lp50xx_probe_dt(struct lp50xx *priv) }
fwnode_for_each_child_node(child, led_node) { + int multi_index; ret = fwnode_property_read_u32(led_node, "color", &color_id); if (ret) { @@ -493,8 +494,16 @@ static int lp50xx_probe_dt(struct lp50xx *priv) dev_err(priv->dev, "Cannot read color\n"); goto child_out; } + ret = fwnode_property_read_u32(led_node, "reg", &multi_index); + if (ret != 0) { + dev_err(priv->dev, "reg must be set\n"); + return -EINVAL; + } else if (multi_index >= LP50XX_LEDS_PER_MODULE) { + dev_err(priv->dev, "reg %i out of range\n", multi_index); + return -EINVAL; + }
- mc_led_info[num_colors].color_index = color_id; + mc_led_info[multi_index].color_index = color_id; num_colors++; }
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c index f8912fa60c49..79719fc8a08f 100644 --- a/drivers/leds/trigger/ledtrig-netdev.c +++ b/drivers/leds/trigger/ledtrig-netdev.c @@ -54,7 +54,6 @@ struct led_netdev_data { unsigned int last_activity;
unsigned long mode; - unsigned long blink_delay; int link_speed; u8 duplex;
@@ -70,10 +69,6 @@ static void set_baseline_state(struct led_netdev_data *trigger_data) /* Already validated, hw control is possible with the requested mode */ if (trigger_data->hw_control) { led_cdev->hw_control_set(led_cdev, trigger_data->mode); - if (led_cdev->blink_set) { - led_cdev->blink_set(led_cdev, &trigger_data->blink_delay, - &trigger_data->blink_delay); - }
return; } @@ -391,11 +386,10 @@ static ssize_t interval_store(struct device *dev, size_t size) { struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev); - struct led_classdev *led_cdev = trigger_data->led_cdev; unsigned long value; int ret;
- if (trigger_data->hw_control && !led_cdev->blink_set) + if (trigger_data->hw_control) return -EINVAL;
ret = kstrtoul(buf, 0, &value); @@ -404,13 +398,9 @@ static ssize_t interval_store(struct device *dev,
/* impose some basic bounds on the timer interval */ if (value >= 5 && value <= 10000) { - if (trigger_data->hw_control) { - trigger_data->blink_delay = value; - } else { - cancel_delayed_work_sync(&trigger_data->work); + cancel_delayed_work_sync(&trigger_data->work);
- atomic_set(&trigger_data->interval, msecs_to_jiffies(value)); - } + atomic_set(&trigger_data->interval, msecs_to_jiffies(value)); set_baseline_state(trigger_data); /* resets timer */ }
diff --git a/drivers/md/dm-ps-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c index b49e10d76d03..2c8626a83de4 100644 --- a/drivers/md/dm-ps-historical-service-time.c +++ b/drivers/md/dm-ps-historical-service-time.c @@ -541,8 +541,10 @@ static int __init dm_hst_init(void) { int r = dm_register_path_selector(&hst_ps);
- if (r < 0) + if (r < 0) { DMERR("register failed %d", r); + return r; + }
DMINFO("version " HST_VERSION " loaded");
diff --git a/drivers/md/dm-ps-queue-length.c b/drivers/md/dm-ps-queue-length.c index e305f05ad1e5..eb543e6431e0 100644 --- a/drivers/md/dm-ps-queue-length.c +++ b/drivers/md/dm-ps-queue-length.c @@ -260,8 +260,10 @@ static int __init dm_ql_init(void) { int r = dm_register_path_selector(&ql_ps);
- if (r < 0) + if (r < 0) { DMERR("register failed %d", r); + return r; + }
DMINFO("version " QL_VERSION " loaded");
diff --git a/drivers/md/dm-ps-round-robin.c b/drivers/md/dm-ps-round-robin.c index 0f04b673597a..62ac820125cb 100644 --- a/drivers/md/dm-ps-round-robin.c +++ b/drivers/md/dm-ps-round-robin.c @@ -220,8 +220,10 @@ static int __init dm_rr_init(void) { int r = dm_register_path_selector(&rr_ps);
- if (r < 0) + if (r < 0) { DMERR("register failed %d", r); + return r; + }
DMINFO("version " RR_VERSION " loaded");
diff --git a/drivers/md/dm-ps-service-time.c b/drivers/md/dm-ps-service-time.c index 969d31c40272..f8c43aecdb27 100644 --- a/drivers/md/dm-ps-service-time.c +++ b/drivers/md/dm-ps-service-time.c @@ -341,8 +341,10 @@ static int __init dm_st_init(void) { int r = dm_register_path_selector(&st_ps);
- if (r < 0) + if (r < 0) { DMERR("register failed %d", r); + return r; + }
DMINFO("version " ST_VERSION " loaded");
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index bf2ade89c8c2..ed0a5e91968d 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -862,17 +862,17 @@ static bool dm_table_supports_dax(struct dm_table *t, return true; }
-static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_is_not_rq_stackable(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct block_device *bdev = dev->bdev; struct request_queue *q = bdev_get_queue(bdev);
/* request-based cannot stack on partitions! */ if (bdev_is_partition(bdev)) - return false; + return true;
- return queue_is_mq(q); + return !queue_is_mq(q); }
static int dm_table_determine_type(struct dm_table *t) @@ -968,7 +968,7 @@ static int dm_table_determine_type(struct dm_table *t)
/* Non-request-stackable devices can't be used for request-based dm */ if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, device_is_rq_stackable, NULL)) { + ti->type->iterate_devices(ti, device_is_not_rq_stackable, NULL)) { DMERR("table load rejected: including non-request-stackable devices"); return -EINVAL; } diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index b487f7acc860..36e55a5bcb0d 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -1062,7 +1062,7 @@ static int dmz_iterate_devices(struct dm_target *ti, struct dmz_target *dmz = ti->private; unsigned int zone_nr_sectors = dmz_zone_nr_sectors(dmz->metadata); sector_t capacity; - int i, r; + int i, r = 0;
for (i = 0; i < dmz->nr_ddevs; i++) { capacity = dmz->dev[i].capacity & ~(zone_nr_sectors - 1); diff --git a/drivers/media/cec/usb/rainshadow/rainshadow-cec.c b/drivers/media/cec/usb/rainshadow/rainshadow-cec.c index ee870ea1a886..6f8d6797c614 100644 --- a/drivers/media/cec/usb/rainshadow/rainshadow-cec.c +++ b/drivers/media/cec/usb/rainshadow/rainshadow-cec.c @@ -171,11 +171,12 @@ static irqreturn_t rain_interrupt(struct serio *serio, unsigned char data, { struct rain *rain = serio_get_drvdata(serio);
+ spin_lock(&rain->buf_lock); if (rain->buf_len == DATA_SIZE) { + spin_unlock(&rain->buf_lock); dev_warn_once(rain->dev, "buffer overflow\n"); return IRQ_HANDLED; } - spin_lock(&rain->buf_lock); rain->buf_len++; rain->buf[rain->buf_wr_idx] = data; rain->buf_wr_idx = (rain->buf_wr_idx + 1) & 0xff; diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c index 444fe1c4bf2d..e561c7cc35bf 100644 --- a/drivers/media/dvb-frontends/dib7000p.c +++ b/drivers/media/dvb-frontends/dib7000p.c @@ -2198,6 +2198,8 @@ static int w7090p_tuner_write_serpar(struct i2c_adapter *i2c_adap, struct i2c_ms struct dib7000p_state *state = i2c_get_adapdata(i2c_adap); u8 n_overflow = 1; u16 i = 1000; + if (msg[0].len < 3) + return -EOPNOTSUPP; u16 serpar_num = msg[0].buf[0];
while (n_overflow == 1 && i) { @@ -2217,6 +2219,8 @@ static int w7090p_tuner_read_serpar(struct i2c_adapter *i2c_adap, struct i2c_msg struct dib7000p_state *state = i2c_get_adapdata(i2c_adap); u8 n_overflow = 1, n_empty = 1; u16 i = 1000; + if (msg[0].len < 1 || msg[1].len < 2) + return -EOPNOTSUPP; u16 serpar_num = msg[0].buf[0]; u16 read_word;
@@ -2261,8 +2265,12 @@ static int dib7090p_rw_on_apb(struct i2c_adapter *i2c_adap, u16 word;
if (num == 1) { /* write */ + if (msg[0].len < 3) + return -EOPNOTSUPP; dib7000p_write_word(state, apb_address, ((msg[0].buf[1] << 8) | (msg[0].buf[2]))); } else { + if (msg[1].len < 2) + return -EOPNOTSUPP; word = dib7000p_read_word(state, apb_address); msg[1].buf[0] = (word >> 8) & 0xff; msg[1].buf[1] = (word) & 0xff; diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c index 364026124257..4d31b2bb8f09 100644 --- a/drivers/media/i2c/ccs/ccs-core.c +++ b/drivers/media/i2c/ccs/ccs-core.c @@ -665,7 +665,7 @@ static int ccs_set_ctrl(struct v4l2_ctrl *ctrl) break; }
- pm_status = pm_runtime_get_if_active(&client->dev, true); + pm_status = pm_runtime_get_if_active(&client->dev); if (!pm_status) return 0;
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c index fd56ba138739..d6ef4a249e94 100644 --- a/drivers/media/i2c/hi556.c +++ b/drivers/media/i2c/hi556.c @@ -689,21 +689,23 @@ static int hi556_test_pattern(struct hi556 *hi556, u32 pattern) int ret; u32 val;
- if (pattern) { - ret = hi556_read_reg(hi556, HI556_REG_ISP, - HI556_REG_VALUE_08BIT, &val); - if (ret) - return ret; + ret = hi556_read_reg(hi556, HI556_REG_ISP, + HI556_REG_VALUE_08BIT, &val); + if (ret) + return ret;
- ret = hi556_write_reg(hi556, HI556_REG_ISP, - HI556_REG_VALUE_08BIT, - val | HI556_REG_ISP_TPG_EN); - if (ret) - return ret; - } + val = pattern ? (val | HI556_REG_ISP_TPG_EN) : + (val & ~HI556_REG_ISP_TPG_EN); + + ret = hi556_write_reg(hi556, HI556_REG_ISP, + HI556_REG_VALUE_08BIT, val); + if (ret) + return ret; + + val = pattern ? BIT(pattern - 1) : 0;
return hi556_write_reg(hi556, HI556_REG_TEST_PATTERN, - HI556_REG_VALUE_08BIT, pattern); + HI556_REG_VALUE_08BIT, val); }
static int hi556_set_ctrl(struct v4l2_ctrl *ctrl) diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c index 5429bd2eb053..5206784726db 100644 --- a/drivers/media/i2c/ov2659.c +++ b/drivers/media/i2c/ov2659.c @@ -1479,14 +1479,15 @@ static int ov2659_probe(struct i2c_client *client) V4L2_CID_TEST_PATTERN, ARRAY_SIZE(ov2659_test_pattern_menu) - 1, 0, 0, ov2659_test_pattern_menu); - ov2659->sd.ctrl_handler = &ov2659->ctrls;
if (ov2659->ctrls.error) { dev_err(&client->dev, "%s: control initialization error %d\n", __func__, ov2659->ctrls.error); + v4l2_ctrl_handler_free(&ov2659->ctrls); return ov2659->ctrls.error; }
+ ov2659->sd.ctrl_handler = &ov2659->ctrls; sd = &ov2659->sd; client->flags |= I2C_CLIENT_SCCB; #ifdef CONFIG_VIDEO_V4L2_SUBDEV_API diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c index c81dd4183404..8a1a33862ba7 100644 --- a/drivers/media/i2c/tc358743.c +++ b/drivers/media/i2c/tc358743.c @@ -110,7 +110,7 @@ static inline struct tc358743_state *to_state(struct v4l2_subdev *sd)
/* --------------- I2C --------------- */
-static void i2c_rd(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n) +static int i2c_rd(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n) { struct tc358743_state *state = to_state(sd); struct i2c_client *client = state->i2c_client; @@ -136,6 +136,7 @@ static void i2c_rd(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n) v4l2_err(sd, "%s: reading register 0x%x from 0x%x failed: %d\n", __func__, reg, client->addr, err); } + return err != ARRAY_SIZE(msgs); }
static void i2c_wr(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n) @@ -192,15 +193,24 @@ static void i2c_wr(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n) } }
-static noinline u32 i2c_rdreg(struct v4l2_subdev *sd, u16 reg, u32 n) +static noinline u32 i2c_rdreg_err(struct v4l2_subdev *sd, u16 reg, u32 n, + int *err) { + int error; __le32 val = 0;
- i2c_rd(sd, reg, (u8 __force *)&val, n); + error = i2c_rd(sd, reg, (u8 __force *)&val, n); + if (err) + *err = error;
return le32_to_cpu(val); }
+static inline u32 i2c_rdreg(struct v4l2_subdev *sd, u16 reg, u32 n) +{ + return i2c_rdreg_err(sd, reg, n, NULL); +} + static noinline void i2c_wrreg(struct v4l2_subdev *sd, u16 reg, u32 val, u32 n) { __le32 raw = cpu_to_le32(val); @@ -229,6 +239,13 @@ static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg) return i2c_rdreg(sd, reg, 2); }
+static int i2c_rd16_err(struct v4l2_subdev *sd, u16 reg, u16 *value) +{ + int err; + *value = i2c_rdreg_err(sd, reg, 2, &err); + return err; +} + static void i2c_wr16(struct v4l2_subdev *sd, u16 reg, u16 val) { i2c_wrreg(sd, reg, val, 2); @@ -1651,12 +1668,23 @@ static int tc358743_enum_mbus_code(struct v4l2_subdev *sd, return 0; }
+static u32 tc358743_g_colorspace(u32 code) +{ + switch (code) { + case MEDIA_BUS_FMT_RGB888_1X24: + return V4L2_COLORSPACE_SRGB; + case MEDIA_BUS_FMT_UYVY8_1X16: + return V4L2_COLORSPACE_SMPTE170M; + default: + return 0; + } +} + static int tc358743_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct tc358743_state *state = to_state(sd); - u8 vi_rep = i2c_rd8(sd, VI_REP);
if (format->pad != 0) return -EINVAL; @@ -1666,23 +1694,7 @@ static int tc358743_get_fmt(struct v4l2_subdev *sd, format->format.height = state->timings.bt.height; format->format.field = V4L2_FIELD_NONE;
- switch (vi_rep & MASK_VOUT_COLOR_SEL) { - case MASK_VOUT_COLOR_RGB_FULL: - case MASK_VOUT_COLOR_RGB_LIMITED: - format->format.colorspace = V4L2_COLORSPACE_SRGB; - break; - case MASK_VOUT_COLOR_601_YCBCR_LIMITED: - case MASK_VOUT_COLOR_601_YCBCR_FULL: - format->format.colorspace = V4L2_COLORSPACE_SMPTE170M; - break; - case MASK_VOUT_COLOR_709_YCBCR_FULL: - case MASK_VOUT_COLOR_709_YCBCR_LIMITED: - format->format.colorspace = V4L2_COLORSPACE_REC709; - break; - default: - format->format.colorspace = 0; - break; - } + format->format.colorspace = tc358743_g_colorspace(format->format.code);
return 0; } @@ -1696,19 +1708,14 @@ static int tc358743_set_fmt(struct v4l2_subdev *sd, u32 code = format->format.code; /* is overwritten by get_fmt */ int ret = tc358743_get_fmt(sd, sd_state, format);
- format->format.code = code; + if (code == MEDIA_BUS_FMT_RGB888_1X24 || + code == MEDIA_BUS_FMT_UYVY8_1X16) + format->format.code = code; + format->format.colorspace = tc358743_g_colorspace(format->format.code);
if (ret) return ret;
- switch (code) { - case MEDIA_BUS_FMT_RGB888_1X24: - case MEDIA_BUS_FMT_UYVY8_1X16: - break; - default: - return -EINVAL; - } - if (format->which == V4L2_SUBDEV_FORMAT_TRY) return 0;
@@ -1932,8 +1939,19 @@ static int tc358743_probe_of(struct tc358743_state *state) state->pdata.refclk_hz = clk_get_rate(refclk); state->pdata.ddc5v_delay = DDC5V_DELAY_100_MS; state->pdata.enable_hdcp = false; - /* A FIFO level of 16 should be enough for 2-lane 720p60 at 594 MHz. */ - state->pdata.fifo_level = 16; + /* + * Ideally the FIFO trigger level should be set based on the input and + * output data rates, but the calculations required are buried in + * Toshiba's register settings spreadsheet. + * A value of 16 works with a 594Mbps data rate for 720p60 (using 2 + * lanes) and 1080p60 (using 4 lanes), but fails when the data rate + * is increased, or a lower pixel clock is used that result in CSI + * reading out faster than the data is arriving. + * + * A value of 374 works with both those modes at 594Mbps, and with most + * modes on 972Mbps. + */ + state->pdata.fifo_level = 374; /* * The PLL input clock is obtained by dividing refclk by pll_prd. * It must be between 6 MHz and 40 MHz, lower frequency is better. @@ -2021,6 +2039,7 @@ static int tc358743_probe(struct i2c_client *client) struct tc358743_platform_data *pdata = client->dev.platform_data; struct v4l2_subdev *sd; u16 irq_mask = MASK_HDMI_MSK | MASK_CSI_MSK; + u16 chipid; int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) @@ -2052,7 +2071,8 @@ static int tc358743_probe(struct i2c_client *client) sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
/* i2c access */ - if ((i2c_rd16(sd, CHIPID) & MASK_CHIPID) != 0) { + if (i2c_rd16_err(sd, CHIPID, &chipid) || + (chipid & MASK_CHIPID) != 0) { v4l2_info(sd, "not a TC358743 on address 0x%x\n", client->addr << 1); return -ENODEV; diff --git a/drivers/media/pci/intel/ivsc/mei_ace.c b/drivers/media/pci/intel/ivsc/mei_ace.c index a0491f307831..5696a951c0fb 100644 --- a/drivers/media/pci/intel/ivsc/mei_ace.c +++ b/drivers/media/pci/intel/ivsc/mei_ace.c @@ -528,6 +528,8 @@ static void mei_ace_remove(struct mei_cl_device *cldev)
ace_set_camera_owner(ace, ACE_CAMERA_IVSC);
+ mei_cldev_disable(cldev); + mutex_destroy(&ace->lock); }
diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c index 685b2ec96071..6c16721a6226 100644 --- a/drivers/media/pci/intel/ivsc/mei_csi.c +++ b/drivers/media/pci/intel/ivsc/mei_csi.c @@ -807,6 +807,8 @@ static void mei_csi_remove(struct mei_cl_device *cldev)
pm_runtime_disable(&cldev->dev);
+ mei_cldev_disable(cldev); + mutex_destroy(&csi->lock); }
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c index 0754645d26ac..e62245c5c6fc 100644 --- a/drivers/media/platform/qcom/camss/camss.c +++ b/drivers/media/platform/qcom/camss/camss.c @@ -1660,7 +1660,7 @@ static int camss_probe(struct platform_device *pdev) ret = v4l2_device_register(camss->dev, &camss->v4l2_dev); if (ret < 0) { dev_err(dev, "Failed to register V4L2 device: %d\n", ret); - goto err_genpd_cleanup; + goto err_media_device_cleanup; }
v4l2_async_nf_init(&camss->notifier, &camss->v4l2_dev); @@ -1711,6 +1711,8 @@ static int camss_probe(struct platform_device *pdev) v4l2_device_unregister(&camss->v4l2_dev); v4l2_async_nf_cleanup(&camss->notifier); pm_runtime_disable(dev); +err_media_device_cleanup: + media_device_cleanup(&camss->media_dev); err_genpd_cleanup: camss_genpd_cleanup(camss);
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c index 47ce3365451d..64a858783c41 100644 --- a/drivers/media/platform/qcom/venus/core.c +++ b/drivers/media/platform/qcom/venus/core.c @@ -334,13 +334,13 @@ static int venus_probe(struct platform_device *pdev) INIT_DELAYED_WORK(&core->work, venus_sys_error_handler); init_waitqueue_head(&core->sys_err_done);
- ret = devm_request_threaded_irq(dev, core->irq, hfi_isr, venus_isr_thread, - IRQF_TRIGGER_HIGH | IRQF_ONESHOT, - "venus", core); + ret = hfi_create(core, &venus_core_ops); if (ret) goto err_core_put;
- ret = hfi_create(core, &venus_core_ops); + ret = devm_request_threaded_irq(dev, core->irq, hfi_isr, venus_isr_thread, + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + "venus", core); if (ret) goto err_core_put;
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h index 4a633261ece4..ba8afda6667d 100644 --- a/drivers/media/platform/qcom/venus/core.h +++ b/drivers/media/platform/qcom/venus/core.h @@ -28,6 +28,8 @@ #define VIDC_PMDOMAINS_NUM_MAX 3 #define VIDC_RESETS_NUM_MAX 2
+#define VENUS_MAX_FPS 240 + extern int venus_fw_debug;
struct freq_tbl { diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c index 0a041b4db9ef..cf0d97cbc463 100644 --- a/drivers/media/platform/qcom/venus/hfi_msgs.c +++ b/drivers/media/platform/qcom/venus/hfi_msgs.c @@ -33,8 +33,9 @@ static void event_seq_changed(struct venus_core *core, struct venus_inst *inst, struct hfi_buffer_requirements *bufreq; struct hfi_extradata_input_crop *crop; struct hfi_dpb_counts *dpb_count; + u32 ptype, rem_bytes; + u32 size_read = 0; u8 *data_ptr; - u32 ptype;
inst->error = HFI_ERR_NONE;
@@ -44,86 +45,118 @@ static void event_seq_changed(struct venus_core *core, struct venus_inst *inst, break; default: inst->error = HFI_ERR_SESSION_INVALID_PARAMETER; - goto done; + inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event); + return; }
event.event_type = pkt->event_data1;
num_properties_changed = pkt->event_data2; - if (!num_properties_changed) { - inst->error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES; - goto done; - } + if (!num_properties_changed) + goto error;
data_ptr = (u8 *)&pkt->ext_event_data[0]; + rem_bytes = pkt->shdr.hdr.size - sizeof(*pkt); + do { + if (rem_bytes < sizeof(u32)) + goto error; ptype = *((u32 *)data_ptr); + + data_ptr += sizeof(u32); + rem_bytes -= sizeof(u32); + switch (ptype) { case HFI_PROPERTY_PARAM_FRAME_SIZE: - data_ptr += sizeof(u32); + if (rem_bytes < sizeof(struct hfi_framesize)) + goto error; + frame_sz = (struct hfi_framesize *)data_ptr; event.width = frame_sz->width; event.height = frame_sz->height; - data_ptr += sizeof(*frame_sz); + size_read = sizeof(struct hfi_framesize); break; case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT: - data_ptr += sizeof(u32); + if (rem_bytes < sizeof(struct hfi_profile_level)) + goto error; + profile_level = (struct hfi_profile_level *)data_ptr; event.profile = profile_level->profile; event.level = profile_level->level; - data_ptr += sizeof(*profile_level); + size_read = sizeof(struct hfi_profile_level); break; case HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH: - data_ptr += sizeof(u32); + if (rem_bytes < sizeof(struct hfi_bit_depth)) + goto error; + pixel_depth = (struct hfi_bit_depth *)data_ptr; event.bit_depth = pixel_depth->bit_depth; - data_ptr += sizeof(*pixel_depth); + size_read = sizeof(struct hfi_bit_depth); break; case HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT: - data_ptr += sizeof(u32); + if (rem_bytes < sizeof(struct hfi_pic_struct)) + goto error; + pic_struct = (struct hfi_pic_struct *)data_ptr; event.pic_struct = pic_struct->progressive_only; - data_ptr += sizeof(*pic_struct); + size_read = sizeof(struct hfi_pic_struct); break; case HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE: - data_ptr += sizeof(u32); + if (rem_bytes < sizeof(struct hfi_colour_space)) + goto error; + colour_info = (struct hfi_colour_space *)data_ptr; event.colour_space = colour_info->colour_space; - data_ptr += sizeof(*colour_info); + size_read = sizeof(struct hfi_colour_space); break; case HFI_PROPERTY_CONFIG_VDEC_ENTROPY: - data_ptr += sizeof(u32); + if (rem_bytes < sizeof(u32)) + goto error; + event.entropy_mode = *(u32 *)data_ptr; - data_ptr += sizeof(u32); + size_read = sizeof(u32); break; case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS: - data_ptr += sizeof(u32); + if (rem_bytes < sizeof(struct hfi_buffer_requirements)) + goto error; + bufreq = (struct hfi_buffer_requirements *)data_ptr; event.buf_count = hfi_bufreq_get_count_min(bufreq, ver); - data_ptr += sizeof(*bufreq); + size_read = sizeof(struct hfi_buffer_requirements); break; case HFI_INDEX_EXTRADATA_INPUT_CROP: - data_ptr += sizeof(u32); + if (rem_bytes < sizeof(struct hfi_extradata_input_crop)) + goto error; + crop = (struct hfi_extradata_input_crop *)data_ptr; event.input_crop.left = crop->left; event.input_crop.top = crop->top; event.input_crop.width = crop->width; event.input_crop.height = crop->height; - data_ptr += sizeof(*crop); + size_read = sizeof(struct hfi_extradata_input_crop); break; case HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS: - data_ptr += sizeof(u32); + if (rem_bytes < sizeof(struct hfi_dpb_counts)) + goto error; + dpb_count = (struct hfi_dpb_counts *)data_ptr; event.buf_count = dpb_count->fw_min_cnt; - data_ptr += sizeof(*dpb_count); + size_read = sizeof(struct hfi_dpb_counts); break; default: + size_read = 0; break; } + data_ptr += size_read; + rem_bytes -= size_read; num_properties_changed--; } while (num_properties_changed > 0);
-done: + inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event); + return; + +error: + inst->error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES; inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event); }
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c index ab93757fff4b..8e2115279601 100644 --- a/drivers/media/platform/qcom/venus/hfi_venus.c +++ b/drivers/media/platform/qcom/venus/hfi_venus.c @@ -239,6 +239,7 @@ static int venus_write_queue(struct venus_hfi_device *hdev, static int venus_read_queue(struct venus_hfi_device *hdev, struct iface_queue *queue, void *pkt, u32 *tx_req) { + struct hfi_pkt_hdr *pkt_hdr = NULL; struct hfi_queue_header *qhdr; u32 dwords, new_rd_idx; u32 rd_idx, wr_idx, type, qsize; @@ -304,6 +305,9 @@ static int venus_read_queue(struct venus_hfi_device *hdev, memcpy(pkt, rd_ptr, len); memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2); } + pkt_hdr = (struct hfi_pkt_hdr *)(pkt); + if ((pkt_hdr->size >> 2) != dwords) + return -EINVAL; } else { /* bad packet received, dropping */ new_rd_idx = qhdr->write_idx; @@ -1689,6 +1693,7 @@ void venus_hfi_destroy(struct venus_core *core) venus_interface_queues_release(hdev); mutex_destroy(&hdev->lock); kfree(hdev); + disable_irq(core->irq); core->ops = NULL; }
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c index 884ee6e9d4bd..8be056210f1d 100644 --- a/drivers/media/platform/qcom/venus/vdec.c +++ b/drivers/media/platform/qcom/venus/vdec.c @@ -481,11 +481,10 @@ static int vdec_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) us_per_frame = timeperframe->numerator * (u64)USEC_PER_SEC; do_div(us_per_frame, timeperframe->denominator);
- if (!us_per_frame) - return -EINVAL; - + us_per_frame = clamp(us_per_frame, 1, USEC_PER_SEC); fps = (u64)USEC_PER_SEC; do_div(fps, us_per_frame); + fps = min(VENUS_MAX_FPS, fps);
inst->fps = fps; inst->timeperframe = *timeperframe; diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c index 44b13696cf82..dd3840f7bb7b 100644 --- a/drivers/media/platform/qcom/venus/venc.c +++ b/drivers/media/platform/qcom/venus/venc.c @@ -411,11 +411,10 @@ static int venc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) us_per_frame = timeperframe->numerator * (u64)USEC_PER_SEC; do_div(us_per_frame, timeperframe->denominator);
- if (!us_per_frame) - return -EINVAL; - + us_per_frame = clamp(us_per_frame, 1, USEC_PER_SEC); fps = (u64)USEC_PER_SEC; do_div(fps, us_per_frame); + fps = min(VENUS_MAX_FPS, fps);
inst->timeperframe = *timeperframe; inst->fps = fps; diff --git a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c index f97527670783..df5b7dadb1b4 100644 --- a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c +++ b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c @@ -17,7 +17,6 @@
#define RK3066_ACLK_MAX_FREQ (300 * 1000 * 1000) #define RK3288_ACLK_MAX_FREQ (400 * 1000 * 1000) -#define RK3588_ACLK_MAX_FREQ (300 * 1000 * 1000)
#define ROCKCHIP_VPU981_MIN_SIZE 64
@@ -441,13 +440,6 @@ static int rk3066_vpu_hw_init(struct hantro_dev *vpu) return 0; }
-static int rk3588_vpu981_hw_init(struct hantro_dev *vpu) -{ - /* Bump ACLKs to max. possible freq. to improve performance. */ - clk_set_rate(vpu->clocks[0].clk, RK3588_ACLK_MAX_FREQ); - return 0; -} - static int rockchip_vpu_hw_init(struct hantro_dev *vpu) { /* Bump ACLK to max. possible freq. to improve performance. */ @@ -808,7 +800,6 @@ const struct hantro_variant rk3588_vpu981_variant = { .codec_ops = rk3588_vpu981_codec_ops, .irqs = rk3588_vpu981_irqs, .num_irqs = ARRAY_SIZE(rk3588_vpu981_irqs), - .init = rk3588_vpu981_hw_init, .clk_names = rk3588_vpu981_vpu_clk_names, .num_clocks = ARRAY_SIZE(rk3588_vpu981_vpu_clk_names) }; diff --git a/drivers/media/test-drivers/vivid/vivid-ctrls.c b/drivers/media/test-drivers/vivid/vivid-ctrls.c index f2b20e25a7a4..5ca385a6a136 100644 --- a/drivers/media/test-drivers/vivid/vivid-ctrls.c +++ b/drivers/media/test-drivers/vivid/vivid-ctrls.c @@ -240,7 +240,8 @@ static const struct v4l2_ctrl_config vivid_ctrl_u8_pixel_array = { .min = 0x00, .max = 0xff, .step = 1, - .dims = { 640 / PIXEL_ARRAY_DIV, 360 / PIXEL_ARRAY_DIV }, + .dims = { DIV_ROUND_UP(360, PIXEL_ARRAY_DIV), + DIV_ROUND_UP(640, PIXEL_ARRAY_DIV) }, };
static const struct v4l2_ctrl_config vivid_ctrl_s32_array = { diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c index 0ab47fb8696b..5d1f78c7604d 100644 --- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c +++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c @@ -460,8 +460,8 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) if (keep_controls) return;
- dims[0] = roundup(dev->src_rect.width, PIXEL_ARRAY_DIV); - dims[1] = roundup(dev->src_rect.height, PIXEL_ARRAY_DIV); + dims[0] = DIV_ROUND_UP(dev->src_rect.height, PIXEL_ARRAY_DIV); + dims[1] = DIV_ROUND_UP(dev->src_rect.width, PIXEL_ARRAY_DIV); v4l2_ctrl_modify_dimensions(dev->pixel_array, dims); }
diff --git a/drivers/media/usb/gspca/vicam.c b/drivers/media/usb/gspca/vicam.c index d98343fd33fe..91e177aa8136 100644 --- a/drivers/media/usb/gspca/vicam.c +++ b/drivers/media/usb/gspca/vicam.c @@ -227,6 +227,7 @@ static int sd_init(struct gspca_dev *gspca_dev) const struct ihex_binrec *rec; const struct firmware *fw; u8 *firmware_buf; + int len;
ret = request_ihex_firmware(&fw, VICAM_FIRMWARE, &gspca_dev->dev->dev); @@ -241,9 +242,14 @@ static int sd_init(struct gspca_dev *gspca_dev) goto exit; } for (rec = (void *)fw->data; rec; rec = ihex_next_binrec(rec)) { - memcpy(firmware_buf, rec->data, be16_to_cpu(rec->len)); + len = be16_to_cpu(rec->len); + if (len > PAGE_SIZE) { + ret = -EINVAL; + break; + } + memcpy(firmware_buf, rec->data, len); ret = vicam_control_msg(gspca_dev, 0xff, 0, 0, firmware_buf, - be16_to_cpu(rec->len)); + len); if (ret < 0) break; } diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c index 070559b01b01..54956a8ff15e 100644 --- a/drivers/media/usb/hdpvr/hdpvr-i2c.c +++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c @@ -165,10 +165,16 @@ static const struct i2c_algorithm hdpvr_algo = { .functionality = hdpvr_functionality, };
+/* prevent invalid 0-length usb_control_msg */ +static const struct i2c_adapter_quirks hdpvr_quirks = { + .flags = I2C_AQ_NO_ZERO_LEN_READ, +}; + static const struct i2c_adapter hdpvr_i2c_adapter_template = { .name = "Hauppauge HD PVR I2C", .owner = THIS_MODULE, .algo = &hdpvr_algo, + .quirks = &hdpvr_quirks, };
static int hdpvr_activate_ir(struct hdpvr_device *dev) diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c index 7495df6b5191..f3633448e8b9 100644 --- a/drivers/media/usb/usbtv/usbtv-video.c +++ b/drivers/media/usb/usbtv/usbtv-video.c @@ -73,6 +73,10 @@ static int usbtv_configure_for_norm(struct usbtv *usbtv, v4l2_std_id norm) }
if (params) { + if (vb2_is_busy(&usbtv->vb2q) && + (usbtv->width != params->cap_width || + usbtv->height != params->cap_height)) + return -EBUSY; usbtv->width = params->cap_width; usbtv->height = params->cap_height; usbtv->n_chunks = usbtv->width * usbtv->height diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index 76f18557f37b..09753993068a 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -235,6 +235,9 @@ static int uvc_parse_format(struct uvc_device *dev, unsigned int i, n; u8 ftype;
+ if (buflen < 4) + return -EINVAL; + format->type = buffer[2]; format->index = buffer[3]; format->frames = frames; diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index 9572fdfe74f2..a9f880eb518a 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -258,6 +258,15 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
ctrl->dwMaxPayloadTransferSize = bandwidth; } + + if (stream->intf->num_altsetting > 1 && + ctrl->dwMaxPayloadTransferSize > stream->maxpsize) { + dev_warn_ratelimited(&stream->intf->dev, + "UVC non compliance: the max payload transmission size (%u) exceeds the size of the ep max packet (%u). Using the max size.\n", + ctrl->dwMaxPayloadTransferSize, + stream->maxpsize); + ctrl->dwMaxPayloadTransferSize = stream->maxpsize; + } }
static size_t uvc_video_ctrl_size(struct uvc_streaming *stream) @@ -1405,12 +1414,6 @@ static void uvc_video_decode_meta(struct uvc_streaming *stream, if (!meta_buf || length == 2) return;
- if (meta_buf->length - meta_buf->bytesused < - length + sizeof(meta->ns) + sizeof(meta->sof)) { - meta_buf->error = 1; - return; - } - has_pts = mem[1] & UVC_STREAM_PTS; has_scr = mem[1] & UVC_STREAM_SCR;
@@ -1431,6 +1434,12 @@ static void uvc_video_decode_meta(struct uvc_streaming *stream, !memcmp(scr, stream->clock.last_scr, 6))) return;
+ if (meta_buf->length - meta_buf->bytesused < + length + sizeof(meta->ns) + sizeof(meta->sof)) { + meta_buf->error = 1; + return; + } + meta = (struct uvc_meta_buf *)((u8 *)meta_buf->mem + meta_buf->bytesused); local_irq_save(flags); time = uvc_video_get_time(); diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c index 3a4b15a98e02..b37507e09339 100644 --- a/drivers/media/v4l2-core/v4l2-common.c +++ b/drivers/media/v4l2-core/v4l2-common.c @@ -487,10 +487,10 @@ s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
freq = div_u64(v4l2_ctrl_g_ctrl_int64(ctrl) * mul, div);
- pr_warn("%s: Link frequency estimated using pixel rate: result might be inaccurate\n", - __func__); - pr_warn("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n", - __func__); + pr_warn_once("%s: Link frequency estimated using pixel rate: result might be inaccurate\n", + __func__); + pr_warn_once("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n", + __func__); }
return freq > 0 ? freq : -EINVAL; diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c index 84fbf4e06cd3..a1d3e93a4095 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls-core.c +++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c @@ -1578,7 +1578,6 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl) kvfree(hdl->buckets); hdl->buckets = NULL; hdl->cached = NULL; - hdl->error = 0; mutex_unlock(hdl->lock); mutex_destroy(&hdl->_lock); } diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index ac71abdce1b2..e0895e979e35 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c @@ -548,7 +548,6 @@ EXPORT_SYMBOL(memstick_add_host); */ void memstick_remove_host(struct memstick_host *host) { - host->removing = 1; flush_workqueue(workqueue); mutex_lock(&host->lock); if (host->card) diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c index dec279845a75..43ec4948daa2 100644 --- a/drivers/memstick/host/rtsx_usb_ms.c +++ b/drivers/memstick/host/rtsx_usb_ms.c @@ -812,6 +812,7 @@ static int rtsx_usb_ms_drv_remove(struct platform_device *pdev) int err;
host->eject = true; + msh->removing = true; cancel_work_sync(&host->handle_req); cancel_delayed_work_sync(&host->poll_card);
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c index 87603eeaa277..2b85da0fcf27 100644 --- a/drivers/mfd/axp20x.c +++ b/drivers/mfd/axp20x.c @@ -936,7 +936,8 @@ static const struct mfd_cell axp152_cells[] = { };
static struct mfd_cell axp313a_cells[] = { - MFD_CELL_NAME("axp20x-regulator"), + /* AXP323 is sometimes paired with AXP717 as sub-PMIC */ + MFD_CELL_BASIC("axp20x-regulator", NULL, NULL, 0, 1), MFD_CELL_RES("axp313a-pek", axp313a_pek_resources), };
diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c index f150d8769f19..f546b050cb49 100644 --- a/drivers/misc/cardreader/rtsx_usb.c +++ b/drivers/misc/cardreader/rtsx_usb.c @@ -698,6 +698,12 @@ static void rtsx_usb_disconnect(struct usb_interface *intf) }
#ifdef CONFIG_PM +static int rtsx_usb_resume_child(struct device *dev, void *data) +{ + pm_request_resume(dev); + return 0; +} + static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message) { struct rtsx_ucr *ucr = @@ -713,8 +719,10 @@ static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message) mutex_unlock(&ucr->dev_mutex);
/* Defer the autosuspend if card exists */ - if (val & (SD_CD | MS_CD)) + if (val & (SD_CD | MS_CD)) { + device_for_each_child(&intf->dev, NULL, rtsx_usb_resume_child); return -EAGAIN; + } } else { /* There is an ongoing operation*/ return -EAGAIN; @@ -724,12 +732,6 @@ static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message) return 0; }
-static int rtsx_usb_resume_child(struct device *dev, void *data) -{ - pm_request_resume(dev); - return 0; -} - static int rtsx_usb_resume(struct usb_interface *intf) { device_for_each_child(&intf->dev, NULL, rtsx_usb_resume_child); diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 2e65ce6bdec7..b94cf7393fad 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -1269,10 +1269,16 @@ static void mei_dev_bus_put(struct mei_device *bus) static void mei_cl_bus_dev_release(struct device *dev) { struct mei_cl_device *cldev = to_mei_cl_device(dev); + struct mei_device *mdev = cldev->cl->dev; + struct mei_cl *cl;
mei_cl_flush_queues(cldev->cl, NULL); mei_me_cl_put(cldev->me_cl); mei_dev_bus_put(cldev->bus); + + list_for_each_entry(cl, &mdev->file_list, link) + WARN_ON(cl == cldev->cl); + kfree(cldev->cl); kfree(cldev); } diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c index ded9b6849e35..90ea92bbdb2c 100644 --- a/drivers/mmc/host/rtsx_usb_sdmmc.c +++ b/drivers/mmc/host/rtsx_usb_sdmmc.c @@ -1032,9 +1032,7 @@ static int sd_set_power_mode(struct rtsx_usb_sdmmc *host, err = sd_power_on(host); }
- if (!err) - host->power_mode = power_mode; - + host->power_mode = power_mode; return err; }
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 82808cc373f6..c2144a3efb30 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -1564,6 +1564,7 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + struct mmc_host *mmc = host->mmc; bool done = false; u32 val = SWITCHABLE_SIGNALING_VOLTAGE; const struct sdhci_msm_offset *msm_offset = @@ -1621,6 +1622,12 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type) "%s: pwr_irq for req: (%d) timed out\n", mmc_hostname(host->mmc), req_type); } + + if ((req_type & REQ_BUS_ON) && mmc->card && !mmc->ops->get_cd(mmc)) { + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); + host->pwr = 0; + } + pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc), __func__, req_type); } @@ -1679,6 +1686,13 @@ static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq) udelay(10); }
+ if ((irq_status & CORE_PWRCTL_BUS_ON) && mmc->card && + !mmc->ops->get_cd(mmc)) { + msm_host_writel(msm_host, CORE_PWRCTL_BUS_FAIL, host, + msm_offset->core_pwrctl_ctl); + return; + } + /* Handle BUS ON/OFF*/ if (irq_status & CORE_PWRCTL_BUS_ON) { pwr_state = REQ_BUS_ON; diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c index 11c404374d79..1dc1b9274b68 100644 --- a/drivers/mmc/host/sdhci-pci-gli.c +++ b/drivers/mmc/host/sdhci-pci-gli.c @@ -28,9 +28,6 @@ #define PCI_GLI_9750_PM_CTRL 0xFC #define PCI_GLI_9750_PM_STATE GENMASK(1, 0)
-#define PCI_GLI_9750_CORRERR_MASK 0x214 -#define PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT BIT(12) - #define SDHCI_GLI_9750_CFG2 0x848 #define SDHCI_GLI_9750_CFG2_L1DLY GENMASK(28, 24) #define GLI_9750_CFG2_L1DLY_VALUE 0x1F @@ -155,9 +152,6 @@ #define PCI_GLI_9755_PM_CTRL 0xFC #define PCI_GLI_9755_PM_STATE GENMASK(1, 0)
-#define PCI_GLI_9755_CORRERR_MASK 0x214 -#define PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT BIT(12) - #define SDHCI_GLI_9767_GM_BURST_SIZE 0x510 #define SDHCI_GLI_9767_GM_BURST_SIZE_AXI_ALWAYS_SET BIT(8)
@@ -227,6 +221,20 @@ #define GLI_MAX_TUNING_LOOP 40
/* Genesys Logic chipset */ +static void sdhci_gli_mask_replay_timer_timeout(struct pci_dev *pdev) +{ + int aer; + u32 value; + + /* mask the replay timer timeout of AER */ + aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); + if (aer) { + pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value); + value |= PCI_ERR_COR_REP_TIMER; + pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value); + } +} + static inline void gl9750_wt_on(struct sdhci_host *host) { u32 wt_value; @@ -568,9 +576,7 @@ static void gl9750_hw_setting(struct sdhci_host *host) pci_write_config_dword(pdev, PCI_GLI_9750_PM_CTRL, value);
/* mask the replay timer timeout of AER */ - pci_read_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, &value); - value |= PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT; - pci_write_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, value); + sdhci_gli_mask_replay_timer_timeout(pdev);
gl9750_wt_off(host); } @@ -782,9 +788,7 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot) pci_write_config_dword(pdev, PCI_GLI_9755_PM_CTRL, value);
/* mask the replay timer timeout of AER */ - pci_read_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, &value); - value |= PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT; - pci_write_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, value); + sdhci_gli_mask_replay_timer_timeout(pdev);
gl9755_wt_off(pdev); } @@ -1343,7 +1347,7 @@ static int gl9763e_add_host(struct sdhci_pci_slot *slot) return ret; }
-static void gli_set_gl9763e(struct sdhci_pci_slot *slot) +static void gl9763e_hw_setting(struct sdhci_pci_slot *slot) { struct pci_dev *pdev = slot->chip->pdev; u32 value; @@ -1372,6 +1376,9 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot) value |= FIELD_PREP(GLI_9763E_HS400_RXDLY, GLI_9763E_HS400_RXDLY_5); pci_write_config_dword(pdev, PCIE_GLI_9763E_CLKRXDLY, value);
+ /* mask the replay timer timeout of AER */ + sdhci_gli_mask_replay_timer_timeout(pdev); + pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value); value &= ~GLI_9763E_VHS_REV; value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R); @@ -1515,7 +1522,7 @@ static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot) gli_pcie_enable_msi(slot); host->mmc_host_ops.hs400_enhanced_strobe = gl9763e_hs400_enhanced_strobe; - gli_set_gl9763e(slot); + gl9763e_hw_setting(slot); sdhci_enable_v4_mode(host);
return 0; diff --git a/drivers/most/core.c b/drivers/most/core.c index e4412c7d25b0..5d073b3d2796 100644 --- a/drivers/most/core.c +++ b/drivers/most/core.c @@ -538,8 +538,8 @@ static struct most_channel *get_channel(char *mdev, char *mdev_ch) dev = bus_find_device_by_name(&mostbus, NULL, mdev); if (!dev) return NULL; - put_device(dev); iface = dev_get_drvdata(dev); + put_device(dev); list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) { if (!strcmp(dev_name(&c->dev), mdev_ch)) return c; diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index 811982da3557..fe5912d31bee 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -503,6 +503,8 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
dma_dev = chan->device; dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); + if (dma_mapping_error(dma_dev->dev, dma_addr)) + return -EINVAL;
if (direction == DMA_TO_DEVICE) { dma_src = dma_addr; diff --git a/drivers/mtd/nand/raw/renesas-nand-controller.c b/drivers/mtd/nand/raw/renesas-nand-controller.c index 589021ea9eb2..a9e79f0acbe2 100644 --- a/drivers/mtd/nand/raw/renesas-nand-controller.c +++ b/drivers/mtd/nand/raw/renesas-nand-controller.c @@ -426,6 +426,9 @@ static int rnandc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf, /* Configure DMA */ dma_addr = dma_map_single(rnandc->dev, rnandc->buf, mtd->writesize, DMA_FROM_DEVICE); + if (dma_mapping_error(rnandc->dev, dma_addr)) + return -ENOMEM; + writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG); writel(mtd->writesize, rnandc->regs + DMA_CNT_REG); writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG); @@ -606,6 +609,9 @@ static int rnandc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf, /* Configure DMA */ dma_addr = dma_map_single(rnandc->dev, (void *)rnandc->buf, mtd->writesize, DMA_TO_DEVICE); + if (dma_mapping_error(rnandc->dev, dma_addr)) + return -ENOMEM; + writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG); writel(mtd->writesize, rnandc->regs + DMA_CNT_REG); writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG); diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index cd21bf8f254a..ee61b2d88232 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -624,7 +624,10 @@ static int spinand_write_page(struct spinand_device *spinand, SPINAND_WRITE_INITIAL_DELAY_US, SPINAND_WRITE_POLL_DELAY_US, &status); - if (!ret && (status & STATUS_PROG_FAILED)) + if (ret) + return ret; + + if (status & STATUS_PROG_FAILED) return -EIO;
return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); diff --git a/drivers/mtd/spi-nor/swp.c b/drivers/mtd/spi-nor/swp.c index 5ab9d5324860..a186d1fde869 100644 --- a/drivers/mtd/spi-nor/swp.c +++ b/drivers/mtd/spi-nor/swp.c @@ -50,7 +50,6 @@ static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor) static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs, uint64_t *len) { - struct mtd_info *mtd = &nor->mtd; u64 min_prot_len; u8 mask = spi_nor_get_sr_bp_mask(nor); u8 tb_mask = spi_nor_get_sr_tb_mask(nor); @@ -71,13 +70,13 @@ static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs, min_prot_len = spi_nor_get_min_prot_length_sr(nor); *len = min_prot_len << (bp - 1);
- if (*len > mtd->size) - *len = mtd->size; + if (*len > nor->params->size) + *len = nor->params->size;
if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask) *ofs = 0; else - *ofs = mtd->size - *len; + *ofs = nor->params->size - *len; }
/* @@ -153,7 +152,6 @@ static bool spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, */ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) { - struct mtd_info *mtd = &nor->mtd; u64 min_prot_len; int ret, status_old, status_new; u8 mask = spi_nor_get_sr_bp_mask(nor); @@ -178,7 +176,7 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) can_be_bottom = false;
/* If anything above us is unlocked, we can't use 'top' protection */ - if (!spi_nor_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len), + if (!spi_nor_is_locked_sr(nor, ofs + len, nor->params->size - (ofs + len), status_old)) can_be_top = false;
@@ -190,11 +188,11 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
/* lock_len: length of region that should end up locked */ if (use_top) - lock_len = mtd->size - ofs; + lock_len = nor->params->size - ofs; else lock_len = ofs + len;
- if (lock_len == mtd->size) { + if (lock_len == nor->params->size) { val = mask; } else { min_prot_len = spi_nor_get_min_prot_length_sr(nor); @@ -243,7 +241,6 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) */ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) { - struct mtd_info *mtd = &nor->mtd; u64 min_prot_len; int ret, status_old, status_new; u8 mask = spi_nor_get_sr_bp_mask(nor); @@ -268,7 +265,7 @@ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) can_be_top = false;
/* If anything above us is locked, we can't use 'bottom' protection */ - if (!spi_nor_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len), + if (!spi_nor_is_unlocked_sr(nor, ofs + len, nor->params->size - (ofs + len), status_old)) can_be_bottom = false;
@@ -280,7 +277,7 @@ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
/* lock_len: length of region that should remain locked */ if (use_top) - lock_len = mtd->size - (ofs + len); + lock_len = nor->params->size - (ofs + len); else lock_len = ofs;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index c99ffe6c683a..d02a91cefec8 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -99,13 +99,16 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker); static void ad_mux_machine(struct port *port, bool *update_slave_arr); static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port); static void ad_tx_machine(struct port *port); -static void ad_periodic_machine(struct port *port, struct bond_params *bond_params); +static void ad_periodic_machine(struct port *port); static void ad_port_selection_logic(struct port *port, bool *update_slave_arr); static void ad_agg_selection_logic(struct aggregator *aggregator, bool *update_slave_arr); static void ad_clear_agg(struct aggregator *aggregator); static void ad_initialize_agg(struct aggregator *aggregator); -static void ad_initialize_port(struct port *port, int lacp_fast); +static void ad_initialize_port(struct port *port, const struct bond_params *bond_params); +static void ad_enable_collecting(struct port *port); +static void ad_disable_distributing(struct port *port, + bool *update_slave_arr); static void ad_enable_collecting_distributing(struct port *port, bool *update_slave_arr); static void ad_disable_collecting_distributing(struct port *port, @@ -171,9 +174,38 @@ static inline int __agg_has_partner(struct aggregator *agg) return !is_zero_ether_addr(agg->partner_system.mac_addr_value); }
+/** + * __disable_distributing_port - disable the port's slave for distributing. + * Port will still be able to collect. + * @port: the port we're looking at + * + * This will disable only distributing on the port's slave. + */ +static void __disable_distributing_port(struct port *port) +{ + bond_set_slave_tx_disabled_flags(port->slave, BOND_SLAVE_NOTIFY_LATER); +} + +/** + * __enable_collecting_port - enable the port's slave for collecting, + * if it's up + * @port: the port we're looking at + * + * This will enable only collecting on the port's slave. + */ +static void __enable_collecting_port(struct port *port) +{ + struct slave *slave = port->slave; + + if (slave->link == BOND_LINK_UP && bond_slave_is_up(slave)) + bond_set_slave_rx_enabled_flags(slave, BOND_SLAVE_NOTIFY_LATER); +} + /** * __disable_port - disable the port's slave * @port: the port we're looking at + * + * This will disable both collecting and distributing on the port's slave. */ static inline void __disable_port(struct port *port) { @@ -183,6 +215,8 @@ static inline void __disable_port(struct port *port) /** * __enable_port - enable the port's slave, if it's up * @port: the port we're looking at + * + * This will enable both collecting and distributing on the port's slave. */ static inline void __enable_port(struct port *port) { @@ -193,10 +227,27 @@ static inline void __enable_port(struct port *port) }
/** - * __port_is_enabled - check if the port's slave is in active state + * __port_move_to_attached_state - check if port should transition back to attached + * state. * @port: the port we're looking at */ -static inline int __port_is_enabled(struct port *port) +static bool __port_move_to_attached_state(struct port *port) +{ + if (!(port->sm_vars & AD_PORT_SELECTED) || + (port->sm_vars & AD_PORT_STANDBY) || + !(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) || + !(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) + port->sm_mux_state = AD_MUX_ATTACHED; + + return port->sm_mux_state == AD_MUX_ATTACHED; +} + +/** + * __port_is_collecting_distributing - check if the port's slave is in the + * combined collecting/distributing state + * @port: the port we're looking at + */ +static int __port_is_collecting_distributing(struct port *port) { return bond_is_active_slave(port->slave); } @@ -942,6 +993,7 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker) */ static void ad_mux_machine(struct port *port, bool *update_slave_arr) { + struct bonding *bond = __get_bond_by_port(port); mux_states_t last_state;
/* keep current State Machine state to compare later if it was @@ -999,9 +1051,13 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) if ((port->sm_vars & AD_PORT_SELECTED) && (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) && !__check_agg_selection_timer(port)) { - if (port->aggregator->is_active) - port->sm_mux_state = - AD_MUX_COLLECTING_DISTRIBUTING; + if (port->aggregator->is_active) { + int state = AD_MUX_COLLECTING_DISTRIBUTING; + + if (!bond->params.coupled_control) + state = AD_MUX_COLLECTING; + port->sm_mux_state = state; + } } else if (!(port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY)) { /* if UNSELECTED or STANDBY */ @@ -1019,11 +1075,45 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) } break; case AD_MUX_COLLECTING_DISTRIBUTING: + if (!__port_move_to_attached_state(port)) { + /* if port state hasn't changed make + * sure that a collecting distributing + * port in an active aggregator is enabled + */ + if (port->aggregator->is_active && + !__port_is_collecting_distributing(port)) { + __enable_port(port); + *update_slave_arr = true; + } + } + break; + case AD_MUX_COLLECTING: + if (!__port_move_to_attached_state(port)) { + if ((port->sm_vars & AD_PORT_SELECTED) && + (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) && + (port->partner_oper.port_state & LACP_STATE_COLLECTING)) { + port->sm_mux_state = AD_MUX_DISTRIBUTING; + } else { + /* If port state hasn't changed, make sure that a collecting + * port is enabled for an active aggregator. + */ + struct slave *slave = port->slave; + + if (port->aggregator->is_active && + bond_is_slave_rx_disabled(slave)) { + ad_enable_collecting(port); + *update_slave_arr = true; + } + } + } + break; + case AD_MUX_DISTRIBUTING: if (!(port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY) || + !(port->partner_oper.port_state & LACP_STATE_COLLECTING) || !(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) || !(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) { - port->sm_mux_state = AD_MUX_ATTACHED; + port->sm_mux_state = AD_MUX_COLLECTING; } else { /* if port state hasn't changed make * sure that a collecting distributing @@ -1031,7 +1121,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) */ if (port->aggregator && port->aggregator->is_active && - !__port_is_enabled(port)) { + !__port_is_collecting_distributing(port)) { __enable_port(port); *update_slave_arr = true; } @@ -1082,6 +1172,20 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) update_slave_arr); port->ntt = true; break; + case AD_MUX_COLLECTING: + port->actor_oper_port_state |= LACP_STATE_COLLECTING; + port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING; + port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION; + ad_enable_collecting(port); + ad_disable_distributing(port, update_slave_arr); + port->ntt = true; + break; + case AD_MUX_DISTRIBUTING: + port->actor_oper_port_state |= LACP_STATE_DISTRIBUTING; + port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION; + ad_enable_collecting_distributing(port, + update_slave_arr); + break; default: break; } @@ -1196,10 +1300,16 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) * case of EXPIRED even if LINK_DOWN didn't arrive for * the port. */ - port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION; port->sm_vars &= ~AD_PORT_MATCHED; + /* Based on IEEE 8021AX-2014, Figure 6-18 - Receive + * machine state diagram, the statue should be + * Partner_Oper_Port_State.Synchronization = FALSE; + * Partner_Oper_Port_State.LACP_Timeout = Short Timeout; + * start current_while_timer(Short Timeout); + * Actor_Oper_Port_State.Expired = TRUE; + */ + port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION; port->partner_oper.port_state |= LACP_STATE_LACP_TIMEOUT; - port->partner_oper.port_state |= LACP_STATE_LACP_ACTIVITY; port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT)); port->actor_oper_port_state |= LACP_STATE_EXPIRED; port->sm_vars |= AD_PORT_CHURNED; @@ -1305,11 +1415,10 @@ static void ad_tx_machine(struct port *port) /** * ad_periodic_machine - handle a port's periodic state machine * @port: the port we're looking at - * @bond_params: bond parameters we will use * * Turn ntt flag on priodically to perform periodic transmission of lacpdu's. */ -static void ad_periodic_machine(struct port *port, struct bond_params *bond_params) +static void ad_periodic_machine(struct port *port) { periodic_states_t last_state;
@@ -1318,8 +1427,7 @@ static void ad_periodic_machine(struct port *port, struct bond_params *bond_para
/* check if port was reinitialized */ if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) || - (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY)) || - !bond_params->lacp_active) { + (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY))) { port->sm_periodic_state = AD_NO_PERIODIC; } /* check if state machine should change state */ @@ -1843,16 +1951,16 @@ static void ad_initialize_agg(struct aggregator *aggregator) /** * ad_initialize_port - initialize a given port's parameters * @port: the port we're looking at - * @lacp_fast: boolean. whether fast periodic should be used + * @bond_params: bond parameters we will use */ -static void ad_initialize_port(struct port *port, int lacp_fast) +static void ad_initialize_port(struct port *port, const struct bond_params *bond_params) { static const struct port_params tmpl = { .system_priority = 0xffff, .key = 1, .port_number = 1, .port_priority = 0xff, - .port_state = 1, + .port_state = 0, }; static const struct lacpdu lacpdu = { .subtype = 0x01, @@ -1870,12 +1978,14 @@ static void ad_initialize_port(struct port *port, int lacp_fast) port->actor_port_priority = 0xff; port->actor_port_aggregator_identifier = 0; port->ntt = false; - port->actor_admin_port_state = LACP_STATE_AGGREGATION | - LACP_STATE_LACP_ACTIVITY; - port->actor_oper_port_state = LACP_STATE_AGGREGATION | - LACP_STATE_LACP_ACTIVITY; + port->actor_admin_port_state = LACP_STATE_AGGREGATION; + port->actor_oper_port_state = LACP_STATE_AGGREGATION; + if (bond_params->lacp_active) { + port->actor_admin_port_state |= LACP_STATE_LACP_ACTIVITY; + port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY; + }
- if (lacp_fast) + if (bond_params->lacp_fast) port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT;
memcpy(&port->partner_admin, &tmpl, sizeof(tmpl)); @@ -1906,6 +2016,45 @@ static void ad_initialize_port(struct port *port, int lacp_fast) } }
+/** + * ad_enable_collecting - enable a port's receive + * @port: the port we're looking at + * + * Enable @port if it's in an active aggregator + */ +static void ad_enable_collecting(struct port *port) +{ + if (port->aggregator->is_active) { + struct slave *slave = port->slave; + + slave_dbg(slave->bond->dev, slave->dev, + "Enabling collecting on port %d (LAG %d)\n", + port->actor_port_number, + port->aggregator->aggregator_identifier); + __enable_collecting_port(port); + } +} + +/** + * ad_disable_distributing - disable a port's transmit + * @port: the port we're looking at + * @update_slave_arr: Does slave array need update? + */ +static void ad_disable_distributing(struct port *port, bool *update_slave_arr) +{ + if (port->aggregator && + !MAC_ADDRESS_EQUAL(&port->aggregator->partner_system, + &(null_mac_addr))) { + slave_dbg(port->slave->bond->dev, port->slave->dev, + "Disabling distributing on port %d (LAG %d)\n", + port->actor_port_number, + port->aggregator->aggregator_identifier); + __disable_distributing_port(port); + /* Slave array needs an update */ + *update_slave_arr = true; + } +} + /** * ad_enable_collecting_distributing - enable a port's transmit/receive * @port: the port we're looking at @@ -2052,7 +2201,7 @@ void bond_3ad_bind_slave(struct slave *slave) /* port initialization */ port = &(SLAVE_AD_INFO(slave)->port);
- ad_initialize_port(port, bond->params.lacp_fast); + ad_initialize_port(port, &bond->params);
port->slave = slave; port->actor_port_number = SLAVE_AD_INFO(slave)->id; @@ -2364,7 +2513,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work) }
ad_rx_machine(NULL, port); - ad_periodic_machine(port, &bond->params); + ad_periodic_machine(port); ad_port_selection_logic(port, &update_slave_arr); ad_mux_machine(port, &update_slave_arr); ad_tx_machine(port); @@ -2734,6 +2883,31 @@ void bond_3ad_update_lacp_rate(struct bonding *bond) spin_unlock_bh(&bond->mode_lock); }
+/** + * bond_3ad_update_lacp_active - change the lacp active + * @bond: bonding struct + * + * Update actor_oper_port_state when lacp_active is modified. + */ +void bond_3ad_update_lacp_active(struct bonding *bond) +{ + struct port *port = NULL; + struct list_head *iter; + struct slave *slave; + int lacp_active; + + lacp_active = bond->params.lacp_active; + spin_lock_bh(&bond->mode_lock); + bond_for_each_slave(bond, slave, iter) { + port = &(SLAVE_AD_INFO(slave)->port); + if (lacp_active) + port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY; + else + port->actor_oper_port_state &= ~LACP_STATE_LACP_ACTIVITY; + } + spin_unlock_bh(&bond->mode_lock); +} + size_t bond_3ad_stats_size(void) { return nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_RX */ diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 85ab69257162..cd5691ed9f17 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -6399,6 +6399,7 @@ static int __init bond_check_params(struct bond_params *params) params->ad_actor_sys_prio = ad_actor_sys_prio; eth_zero_addr(params->ad_actor_system); params->ad_user_port_key = ad_user_port_key; + params->coupled_control = 1; if (packets_per_slave > 0) { params->reciprocal_packets_per_slave = reciprocal_value(packets_per_slave); diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index 27cbe148f0db..aebc814ad495 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -122,6 +122,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = { [IFLA_BOND_PEER_NOTIF_DELAY] = NLA_POLICY_FULL_RANGE(NLA_U32, &delay_range), [IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 }, [IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED }, + [IFLA_BOND_COUPLED_CONTROL] = { .type = NLA_U8 }, };
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = { @@ -549,6 +550,16 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[], return err; }
+ if (data[IFLA_BOND_COUPLED_CONTROL]) { + int coupled_control = nla_get_u8(data[IFLA_BOND_COUPLED_CONTROL]); + + bond_opt_initval(&newval, coupled_control); + err = __bond_opt_set(bond, BOND_OPT_COUPLED_CONTROL, &newval, + data[IFLA_BOND_COUPLED_CONTROL], extack); + if (err) + return err; + } + return 0; }
@@ -615,6 +626,7 @@ static size_t bond_get_size(const struct net_device *bond_dev) /* IFLA_BOND_NS_IP6_TARGET */ nla_total_size(sizeof(struct nlattr)) + nla_total_size(sizeof(struct in6_addr)) * BOND_MAX_NS_TARGETS + + nla_total_size(sizeof(u8)) + /* IFLA_BOND_COUPLED_CONTROL */ 0; }
@@ -774,6 +786,10 @@ static int bond_fill_info(struct sk_buff *skb, bond->params.missed_max)) goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_BOND_COUPLED_CONTROL, + bond->params.coupled_control)) + goto nla_put_failure; + if (BOND_MODE(bond) == BOND_MODE_8023AD) { struct ad_info info;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 6d003c0ef669..8291803e4f00 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -85,7 +85,8 @@ static int bond_option_ad_user_port_key_set(struct bonding *bond, const struct bond_opt_value *newval); static int bond_option_missed_max_set(struct bonding *bond, const struct bond_opt_value *newval); - +static int bond_option_coupled_control_set(struct bonding *bond, + const struct bond_opt_value *newval);
static const struct bond_opt_value bond_mode_tbl[] = { { "balance-rr", BOND_MODE_ROUNDROBIN, BOND_VALFLAG_DEFAULT}, @@ -233,6 +234,12 @@ static const struct bond_opt_value bond_missed_max_tbl[] = { { NULL, -1, 0}, };
+static const struct bond_opt_value bond_coupled_control_tbl[] = { + { "on", 1, BOND_VALFLAG_DEFAULT}, + { "off", 0, 0}, + { NULL, -1, 0}, +}; + static const struct bond_option bond_opts[BOND_OPT_LAST] = { [BOND_OPT_MODE] = { .id = BOND_OPT_MODE, @@ -497,6 +504,15 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = { .desc = "Delay between each peer notification on failover event, in milliseconds", .values = bond_peer_notif_delay_tbl, .set = bond_option_peer_notif_delay_set + }, + [BOND_OPT_COUPLED_CONTROL] = { + .id = BOND_OPT_COUPLED_CONTROL, + .name = "coupled_control", + .desc = "Opt into using coupled control MUX for LACP states", + .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)), + .flags = BOND_OPTFLAG_IFDOWN, + .values = bond_coupled_control_tbl, + .set = bond_option_coupled_control_set, } };
@@ -1618,6 +1634,7 @@ static int bond_option_lacp_active_set(struct bonding *bond, netdev_dbg(bond->dev, "Setting LACP active to %s (%llu)\n", newval->string, newval->value); bond->params.lacp_active = newval->value; + bond_3ad_update_lacp_active(bond);
return 0; } @@ -1811,3 +1828,13 @@ static int bond_option_ad_user_port_key_set(struct bonding *bond, bond->params.ad_user_port_key = newval->value; return 0; } + +static int bond_option_coupled_control_set(struct bonding *bond, + const struct bond_opt_value *newval) +{ + netdev_info(bond->dev, "Setting coupled_control to %s (%llu)\n", + newval->string, newval->value); + + bond->params.coupled_control = newval->value; + return 0; +} diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 004d2c988ff0..b00bac468677 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -339,18 +339,23 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
- /* Include IMP port in dumb forwarding mode - */ - b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt); - mgmt |= B53_MII_DUMB_FWDG_EN; - b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); - - /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether - * frames should be flooded or not. - */ - b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); - mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN; - b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); + if (!is5325(dev)) { + /* Include IMP port in dumb forwarding mode */ + b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt); + mgmt |= B53_MII_DUMB_FWDG_EN; + b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); + + /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether + * frames should be flooded or not. + */ + b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); + mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN; + b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); + } else { + b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); + mgmt |= B53_IP_MCAST_25; + b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); + } }
static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, @@ -507,6 +512,10 @@ void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) unsigned int i; u16 pvlan;
+ /* BCM5325 CPU port is at 8 */ + if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25) + cpu_port = B53_CPU_PORT; + /* Enable the IMP port to be in the same VLAN as the other ports * on a per-port basis such that we only have Port i and IMP in * the same VLAN. @@ -557,6 +566,9 @@ static void b53_port_set_learning(struct b53_device *dev, int port, { u16 reg;
+ if (is5325(dev)) + return; + b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, ®); if (learning) reg &= ~BIT(port); @@ -1163,6 +1175,8 @@ static void b53_force_link(struct b53_device *dev, int port, int link) if (port == dev->imp_port) { off = B53_PORT_OVERRIDE_CTRL; val = PORT_OVERRIDE_EN; + } else if (is5325(dev)) { + return; } else { off = B53_GMII_PORT_OVERRIDE_CTRL(port); val = GMII_PO_EN; @@ -1187,6 +1201,8 @@ static void b53_force_port_config(struct b53_device *dev, int port, if (port == dev->imp_port) { off = B53_PORT_OVERRIDE_CTRL; val = PORT_OVERRIDE_EN; + } else if (is5325(dev)) { + return; } else { off = B53_GMII_PORT_OVERRIDE_CTRL(port); val = GMII_PO_EN; @@ -1217,10 +1233,19 @@ static void b53_force_port_config(struct b53_device *dev, int port, return; }
- if (rx_pause) - reg |= PORT_OVERRIDE_RX_FLOW; - if (tx_pause) - reg |= PORT_OVERRIDE_TX_FLOW; + if (rx_pause) { + if (is5325(dev)) + reg |= PORT_OVERRIDE_LP_FLOW_25; + else + reg |= PORT_OVERRIDE_RX_FLOW; + } + + if (tx_pause) { + if (is5325(dev)) + reg |= PORT_OVERRIDE_LP_FLOW_25; + else + reg |= PORT_OVERRIDE_TX_FLOW; + }
b53_write8(dev, B53_CTRL_PAGE, off, reg); } @@ -2045,7 +2070,13 @@ int b53_br_flags_pre(struct dsa_switch *ds, int port, struct switchdev_brport_flags flags, struct netlink_ext_ack *extack) { - if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_LEARNING)) + struct b53_device *dev = ds->priv; + unsigned long mask = (BR_FLOOD | BR_MCAST_FLOOD); + + if (!is5325(dev)) + mask |= BR_LEARNING; + + if (flags.mask & ~mask) return -EINVAL;
return 0; diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index bfbcb66bef66..3179fe58de6b 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h @@ -92,6 +92,7 @@ #define PORT_OVERRIDE_SPEED_10M (0 << PORT_OVERRIDE_SPEED_S) #define PORT_OVERRIDE_SPEED_100M (1 << PORT_OVERRIDE_SPEED_S) #define PORT_OVERRIDE_SPEED_1000M (2 << PORT_OVERRIDE_SPEED_S) +#define PORT_OVERRIDE_LP_FLOW_25 BIT(3) /* BCM5325 only */ #define PORT_OVERRIDE_RV_MII_25 BIT(4) /* BCM5325 only */ #define PORT_OVERRIDE_RX_FLOW BIT(4) #define PORT_OVERRIDE_TX_FLOW BIT(5) @@ -103,6 +104,7 @@
/* IP Multicast control (8 bit) */ #define B53_IP_MULTICAST_CTRL 0x21 +#define B53_IP_MCAST_25 BIT(0) #define B53_IPMC_FWD_EN BIT(1) #define B53_UC_FWD_EN BIT(6) #define B53_MC_FWD_EN BIT(7) diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 3d9220f9c9fe..294dbe2c3797 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -2459,6 +2459,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) skb->data, skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, + dma_addr)) + return -ENOMEM; + desc[frag].addr_lo = lower_32_bits(dma_addr); desc[frag].addr_hi = upper_32_bits(dma_addr); frag++; @@ -2468,6 +2472,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) skb->data, skb_headlen(skb) / 2, DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, + dma_addr)) + return -ENOMEM; + desc[frag].addr_lo = lower_32_bits(dma_addr); desc[frag].addr_hi = upper_32_bits(dma_addr); frag++; @@ -2478,6 +2486,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) skb_headlen(skb) / 2, skb_headlen(skb) / 2, DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, + dma_addr)) + goto unmap_first_out; + desc[frag].addr_lo = lower_32_bits(dma_addr); desc[frag].addr_hi = upper_32_bits(dma_addr); frag++; @@ -2489,6 +2501,9 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) 0, desc[frag].len_vlan, DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, dma_addr)) + goto unmap_out; + desc[frag].addr_lo = lower_32_bits(dma_addr); desc[frag].addr_hi = upper_32_bits(dma_addr); frag++; @@ -2578,6 +2593,27 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) &adapter->regs->global.watchdog_timer); } return 0; + +unmap_out: + // Unmap the body of the packet with map_page + while (--i) { + frag--; + dma_addr = desc[frag].addr_lo; + dma_addr |= (u64)desc[frag].addr_hi << 32; + dma_unmap_page(&adapter->pdev->dev, dma_addr, + desc[frag].len_vlan, DMA_TO_DEVICE); + } + +unmap_first_out: + // Unmap the header with map_single + while (frag--) { + dma_addr = desc[frag].addr_lo; + dma_addr |= (u64)desc[frag].addr_hi << 32; + dma_unmap_single(&adapter->pdev->dev, dma_addr, + desc[frag].len_vlan, DMA_TO_DEVICE); + } + + return -ENOMEM; }
static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index dbd284660135..7f616abd3db2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -113,6 +113,8 @@ struct aq_stats_s { #define AQ_HW_POWER_STATE_D0 0U #define AQ_HW_POWER_STATE_D3 3U
+#define AQ_FW_WAKE_ON_LINK_RTPM BIT(10) + #define AQ_HW_FLAG_STARTED 0x00000004U #define AQ_HW_FLAG_STOPPING 0x00000008U #define AQ_HW_FLAG_RESETTING 0x00000010U diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c index 52e2070a4a2f..7370e3f76b62 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c @@ -462,6 +462,44 @@ static int aq_a2_fw_get_mac_temp(struct aq_hw_s *self, int *temp) return aq_a2_fw_get_phy_temp(self, temp); }
+static int aq_a2_fw_set_wol_params(struct aq_hw_s *self, const u8 *mac, u32 wol) +{ + struct mac_address_aligned_s mac_address; + struct link_control_s link_control; + struct wake_on_lan_s wake_on_lan; + + memcpy(mac_address.aligned.mac_address, mac, ETH_ALEN); + hw_atl2_shared_buffer_write(self, mac_address, mac_address); + + memset(&wake_on_lan, 0, sizeof(wake_on_lan)); + + if (wol & WAKE_MAGIC) + wake_on_lan.wake_on_magic_packet = 1U; + + if (wol & (WAKE_PHY | AQ_FW_WAKE_ON_LINK_RTPM)) + wake_on_lan.wake_on_link_up = 1U; + + hw_atl2_shared_buffer_write(self, sleep_proxy, wake_on_lan); + + hw_atl2_shared_buffer_get(self, link_control, link_control); + link_control.mode = AQ_HOST_MODE_SLEEP_PROXY; + hw_atl2_shared_buffer_write(self, link_control, link_control); + + return hw_atl2_shared_buffer_finish_ack(self); +} + +static int aq_a2_fw_set_power(struct aq_hw_s *self, unsigned int power_state, + const u8 *mac) +{ + u32 wol = self->aq_nic_cfg->wol; + int err = 0; + + if (wol) + err = aq_a2_fw_set_wol_params(self, mac, wol); + + return err; +} + static int aq_a2_fw_set_eee_rate(struct aq_hw_s *self, u32 speed) { struct link_options_s link_options; @@ -605,6 +643,7 @@ const struct aq_fw_ops aq_a2_fw_ops = { .set_state = aq_a2_fw_set_state, .update_link_status = aq_a2_fw_update_link_status, .update_stats = aq_a2_fw_update_stats, + .set_power = aq_a2_fw_set_power, .get_mac_temp = aq_a2_fw_get_mac_temp, .get_phy_temp = aq_a2_fw_get_phy_temp, .set_eee_rate = aq_a2_fw_set_eee_rate, diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 009e0b3066fa..baf12ae0b8c4 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -1234,6 +1234,11 @@ static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf, buf->rx.rx_buf = data; buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size, DMA_FROM_DEVICE); + if (dma_mapping_error(&ag->pdev->dev, buf->rx.dma_addr)) { + skb_free_frag(data); + buf->rx.rx_buf = NULL; + return false; + } desc->data = (u32)buf->rx.dma_addr + offset; return true; } @@ -1532,6 +1537,10 @@ static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&ag->pdev->dev, dma_addr)) { + netif_dbg(ag, tx_err, ndev, "DMA mapping error\n"); + goto err_drop; + }
i = ring->curr & ring_mask; desc = ag71xx_ring_desc(ring, i); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index a317feb8decb..087d4c2b3efd 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1427,9 +1427,9 @@ static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl, { struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; struct bgx *bgx = context; - char bgx_sel[5]; + char bgx_sel[7];
- snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id); + snprintf(bgx_sel, sizeof(bgx_sel), "BGX%d", bgx->bgx_id); if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) { pr_warn("Invalid link device\n"); return AE_OK; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 6bc0fde95f9d..0fda17bc8e23 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1465,10 +1465,10 @@ static void be_tx_timeout(struct net_device *netdev, unsigned int txqueue) ntohs(tcphdr->source)); dev_info(dev, "TCP dest port %d\n", ntohs(tcphdr->dest)); - dev_info(dev, "TCP sequence num %d\n", - ntohs(tcphdr->seq)); - dev_info(dev, "TCP ack_seq %d\n", - ntohs(tcphdr->ack_seq)); + dev_info(dev, "TCP sequence num %u\n", + ntohl(tcphdr->seq)); + dev_info(dev, "TCP ack_seq %u\n", + ntohl(tcphdr->ack_seq)); } else if (ip_hdr(skb)->protocol == IPPROTO_UDP) { udphdr = udp_hdr(skb); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index e7bf70ac9a4c..6b7e1bb5c62d 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -28,7 +28,6 @@ #include <linux/percpu.h> #include <linux/dma-mapping.h> #include <linux/sort.h> -#include <linux/phy_fixed.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <soc/fsl/bman.h> @@ -3141,7 +3140,6 @@ static const struct net_device_ops dpaa_ops = { .ndo_stop = dpaa_eth_stop, .ndo_tx_timeout = dpaa_tx_timeout, .ndo_get_stats64 = dpaa_get_stats64, - .ndo_change_carrier = fixed_phy_change_carrier, .ndo_set_mac_address = dpaa_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = dpaa_set_rx_mode, diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 3f8cd4a7d845..20d73a4ed22c 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -415,8 +415,10 @@ static int dpaa_get_ts_info(struct net_device *net_dev, of_node_put(ptp_node); }
- if (ptp_dev) + if (ptp_dev) { ptp = platform_get_drvdata(ptp_dev); + put_device(&ptp_dev->dev); + }
if (ptp) info->phc_index = ptp->phc_index; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index a856047f1dfd..0ad3ea380e9b 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -1179,19 +1179,29 @@ static int enetc_pf_register_with_ierb(struct pci_dev *pdev) { struct platform_device *ierb_pdev; struct device_node *ierb_node; + int ret;
ierb_node = of_find_compatible_node(NULL, NULL, "fsl,ls1028a-enetc-ierb"); - if (!ierb_node || !of_device_is_available(ierb_node)) + if (!ierb_node) return -ENODEV;
+ if (!of_device_is_available(ierb_node)) { + of_node_put(ierb_node); + return -ENODEV; + } + ierb_pdev = of_find_device_by_node(ierb_node); of_node_put(ierb_node);
if (!ierb_pdev) return -EPROBE_DEFER;
- return enetc_ierb_register_pf(ierb_pdev, pdev); + ret = enetc_ierb_register_pf(ierb_pdev, pdev); + + put_device(&ierb_pdev->dev); + + return ret; }
static struct enetc_si *enetc_psi_create(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 291c88a76a27..2a8b5429df59 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3069,27 +3069,25 @@ static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) static void fec_enet_itr_coal_set(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - int rx_itr, tx_itr; + u32 rx_itr = 0, tx_itr = 0; + int rx_ictt, tx_ictt;
- /* Must be greater than zero to avoid unpredictable behavior */ - if (!fep->rx_time_itr || !fep->rx_pkts_itr || - !fep->tx_time_itr || !fep->tx_pkts_itr) - return; - - /* Select enet system clock as Interrupt Coalescing - * timer Clock Source - */ - rx_itr = FEC_ITR_CLK_SEL; - tx_itr = FEC_ITR_CLK_SEL; + rx_ictt = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); + tx_ictt = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
- /* set ICFT and ICTT */ - rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); - rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); - tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); - tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); + if (rx_ictt > 0 && fep->rx_pkts_itr > 1) { + /* Enable with enet system clock as Interrupt Coalescing timer Clock Source */ + rx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL; + rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); + rx_itr |= FEC_ITR_ICTT(rx_ictt); + }
- rx_itr |= FEC_ITR_EN; - tx_itr |= FEC_ITR_EN; + if (tx_ictt > 0 && fep->tx_pkts_itr > 1) { + /* Enable with enet system clock as Interrupt Coalescing timer Clock Source */ + tx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL; + tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); + tx_itr |= FEC_ITR_ICTT(tx_ictt); + }
writel(tx_itr, fep->hwp + FEC_TXIC0); writel(rx_itr, fep->hwp + FEC_RXIC0); diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 7a15b9245698..30c7bc019d2f 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1468,8 +1468,10 @@ static int gfar_get_ts_info(struct net_device *dev, if (ptp_node) { ptp_dev = of_find_device_by_node(ptp_node); of_node_put(ptp_node); - if (ptp_dev) + if (ptp_dev) { ptp = platform_get_drvdata(ptp_dev); + put_device(&ptp_dev->dev); + } }
if (ptp) diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index 79db7a6d42bc..9c50febb4271 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -431,6 +431,7 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv, break; default: dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode); + return -EINVAL; }
return 0; diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index ec189f0703f9..241a541b8edd 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -2373,6 +2373,8 @@ static void gve_shutdown(struct pci_dev *pdev) struct gve_priv *priv = netdev_priv(netdev); bool was_up = netif_carrier_ok(priv->dev);
+ netif_device_detach(netdev); + rtnl_lock(); if (was_up && gve_close(priv->dev)) { /* If the dev was up, attempt to close, if close fails, reset */ diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 11543db4c47f..3e1408e1c1fc 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -6772,6 +6772,13 @@ static int igc_probe(struct pci_dev *pdev, adapter->port_num = hw->bus.func; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+ /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + /* Disable ASPM L1.2 on I226 devices to avoid packet loss */ if (igc_is_device_id_i226(hw)) pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2); @@ -6797,13 +6804,6 @@ static int igc_probe(struct pci_dev *pdev, netdev->mem_start = pci_resource_start(pdev, 0); netdev->mem_end = pci_resource_end(pdev, 0);
- /* PCI config space info */ - hw->vendor_id = pdev->vendor; - hw->device_id = pdev->device; - hw->revision_id = pdev->revision; - hw->subsystem_vendor_id = pdev->subsystem_vendor; - hw->subsystem_device_id = pdev->subsystem_device; - /* Copy the default MAC and PHY function pointers */ memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index 1703c640a434..7ef82c30e857 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -403,7 +403,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) dma_addr_t dma; u32 cmd_type;
- while (budget-- > 0) { + while (likely(budget)) { if (unlikely(!ixgbe_desc_unused(xdp_ring))) { work_done = false; break; @@ -438,6 +438,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) xdp_ring->next_to_use++; if (xdp_ring->next_to_use == xdp_ring->count) xdp_ring->next_to_use = 0; + + budget--; }
if (tx_desc) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 237f82082ebe..0f4e462d39c2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -580,8 +580,8 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) if (!npc_check_field(rvu, blkaddr, NPC_LB, intf)) *features &= ~BIT_ULL(NPC_OUTER_VID);
- /* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */ - if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) && + /* Allow extracting SPI field from AH and ESP headers at same offset */ + if (npc_is_field_present(rvu, NPC_IPSEC_SPI, intf) && (*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH)))) *features |= BIT_ULL(NPC_IPSEC_SPI);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index 889fd26843e6..11e16c9e4e92 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -101,7 +101,9 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)) return -1;
+ rcu_read_lock(); err = dev_fill_forward_path(dev, addr, &stack); + rcu_read_unlock(); if (err) return err;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c index 85a9ad2b86bf..525e2d365cd9 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -1886,7 +1886,6 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, if (!pdev) goto err_of_node_put;
- get_device(&pdev->dev); irq = platform_get_irq(pdev, 0); if (irq < 0) goto err_put_device; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index 5ae787656a7c..3efa8bf1d14e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -272,8 +272,8 @@ static int port_update_shared_buffer(struct mlx5_core_dev *mdev, /* Total shared buffer size is split in a ratio of 3:1 between * lossy and lossless pools respectively. */ - lossy_epool_size = (shared_buffer_size / 4) * 3; lossless_ipool_size = shared_buffer_size / 4; + lossy_epool_size = shared_buffer_size - lossless_ipool_size;
mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0, lossy_epool_size); @@ -288,14 +288,12 @@ static int port_set_buffer(struct mlx5e_priv *priv, u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz; struct mlx5_core_dev *mdev = priv->mdev; int sz = MLX5_ST_SZ_BYTES(pbmc_reg); - u32 new_headroom_size = 0; - u32 current_headroom_size; + u32 current_headroom_cells = 0; + u32 new_headroom_cells = 0; void *in; int err; int i;
- current_headroom_size = port_buffer->headroom_size; - in = kzalloc(sz, GFP_KERNEL); if (!in) return -ENOMEM; @@ -306,12 +304,14 @@ static int port_set_buffer(struct mlx5e_priv *priv,
for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) { void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]); + current_headroom_cells += MLX5_GET(bufferx_reg, buffer, size); + u64 size = port_buffer->buffer[i].size; u64 xoff = port_buffer->buffer[i].xoff; u64 xon = port_buffer->buffer[i].xon;
- new_headroom_size += size; do_div(size, port_buff_cell_sz); + new_headroom_cells += size; do_div(xoff, port_buff_cell_sz); do_div(xon, port_buff_cell_sz); MLX5_SET(bufferx_reg, buffer, size, size); @@ -320,10 +320,8 @@ static int port_set_buffer(struct mlx5e_priv *priv, MLX5_SET(bufferx_reg, buffer, xon_threshold, xon); }
- new_headroom_size /= port_buff_cell_sz; - current_headroom_size /= port_buff_cell_sz; - err = port_update_shared_buffer(priv->mdev, current_headroom_size, - new_headroom_size); + err = port_update_shared_buffer(priv->mdev, current_headroom_cells, + new_headroom_cells); if (err) goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c index d9acc37afe1c..74729bf168b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c @@ -362,7 +362,7 @@ void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_que void mlx5e_reset_qdisc(struct net_device *dev, u16 qid) { struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid); - struct Qdisc *qdisc = dev_queue->qdisc_sleeping; + struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
if (!qdisc) return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index d8e739cbcbce..91319b5acd3d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -47,10 +47,12 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch * devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum, vport_num - 1, external); } else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) { + u16 base_vport = mlx5_core_ec_vf_vport_base(dev); + memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len); dl_port->attrs.switch_id.id_len = ppid.id_len; devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum, - vport_num - 1, false); + vport_num - base_vport, false); } }
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 9dbd5edff0b0..51f49510826a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2528,6 +2528,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { ROUTER_EXP, false), MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, ROUTER_EXP, false), + MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_LINK_LOCAL, FORWARD, + ROUTER_EXP, false), /* Multicast Router Traps */ MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 83477c8e6971..5bfc1499347a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -95,6 +95,7 @@ enum { MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A, MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B, MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LINK_LOCAL = 0x16C, + MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LINK_LOCAL = 0x16D, MLXSW_TRAP_ID_DISCARD_ROUTER_IRIF_EN = 0x178, MLXSW_TRAP_ID_DISCARD_ROUTER_ERIF_EN = 0x179, MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index bc7c5cd38596..1ac7a40fcc43 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -3394,10 +3394,6 @@ void ionic_lif_free(struct ionic_lif *lif) lif->info = NULL; lif->info_pa = 0;
- /* unmap doorbell page */ - ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); - lif->kern_dbpage = NULL; - mutex_destroy(&lif->config_lock); mutex_destroy(&lif->queue_lock);
@@ -3423,6 +3419,9 @@ void ionic_lif_deinit(struct ionic_lif *lif) ionic_lif_qcq_deinit(lif, lif->notifyqcq); ionic_lif_qcq_deinit(lif, lif->adminqcq);
+ ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); + lif->kern_dbpage = NULL; + ionic_lif_reset(lif); }
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c index f3315c651515..e7306ed52922 100644 --- a/drivers/net/ethernet/ti/icssg/icss_iep.c +++ b/drivers/net/ethernet/ti/icssg/icss_iep.c @@ -638,7 +638,8 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on)
static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on) { - u32 val, cap, ret = 0; + u32 val, cap; + int ret = 0;
mutex_lock(&iep->ptp_clk_mutex);
@@ -702,10 +703,16 @@ struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx) struct platform_device *pdev; struct device_node *iep_np; struct icss_iep *iep; + int ret;
iep_np = of_parse_phandle(np, "ti,iep", idx); - if (!iep_np || !of_device_is_available(iep_np)) + if (!iep_np) + return ERR_PTR(-ENODEV); + + if (!of_device_is_available(iep_np)) { + of_node_put(iep_np); return ERR_PTR(-ENODEV); + }
pdev = of_find_device_by_node(iep_np); of_node_put(iep_np); @@ -715,21 +722,28 @@ struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx) return ERR_PTR(-EPROBE_DEFER);
iep = platform_get_drvdata(pdev); - if (!iep) - return ERR_PTR(-EPROBE_DEFER); + if (!iep) { + ret = -EPROBE_DEFER; + goto err_put_pdev; + }
device_lock(iep->dev); if (iep->client_np) { device_unlock(iep->dev); dev_err(iep->dev, "IEP is already acquired by %s", iep->client_np->name); - return ERR_PTR(-EBUSY); + ret = -EBUSY; + goto err_put_pdev; } iep->client_np = np; device_unlock(iep->dev); - get_device(iep->dev);
return iep; + +err_put_pdev: + put_device(&pdev->dev); + + return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(icss_iep_get_idx);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 4c12067b07f0..f07bde6695c7 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -1061,6 +1061,7 @@ struct net_device_context { struct net_device __rcu *vf_netdev; struct netvsc_vf_pcpu_stats __percpu *vf_stats; struct delayed_work vf_takeover; + struct delayed_work vfns_work;
/* 1: allocated, serial number is valid. 0: not allocated */ u32 vf_alloc; @@ -1075,6 +1076,8 @@ struct net_device_context { struct netvsc_device_info *saved_netvsc_dev_info; };
+void netvsc_vfns_work(struct work_struct *w); + /* Azure hosts don't support non-TCP port numbers in hashing for fragmented * packets. We can use ethtool to change UDP hash level when necessary. */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index f33f9167ba6b..aa114240e340 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2513,6 +2513,7 @@ static int netvsc_probe(struct hv_device *dev, spin_lock_init(&net_device_ctx->lock); INIT_LIST_HEAD(&net_device_ctx->reconfig_events); INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); + INIT_DELAYED_WORK(&net_device_ctx->vfns_work, netvsc_vfns_work);
net_device_ctx->vf_stats = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats); @@ -2655,6 +2656,8 @@ static void netvsc_remove(struct hv_device *dev) cancel_delayed_work_sync(&ndev_ctx->dwork);
rtnl_lock(); + cancel_delayed_work_sync(&ndev_ctx->vfns_work); + nvdev = rtnl_dereference(ndev_ctx->nvdev); if (nvdev) { cancel_work_sync(&nvdev->subchan_work); @@ -2696,6 +2699,7 @@ static int netvsc_suspend(struct hv_device *dev) cancel_delayed_work_sync(&ndev_ctx->dwork);
rtnl_lock(); + cancel_delayed_work_sync(&ndev_ctx->vfns_work);
nvdev = rtnl_dereference(ndev_ctx->nvdev); if (nvdev == NULL) { @@ -2789,6 +2793,27 @@ static void netvsc_event_set_vf_ns(struct net_device *ndev) } }
+void netvsc_vfns_work(struct work_struct *w) +{ + struct net_device_context *ndev_ctx = + container_of(w, struct net_device_context, vfns_work.work); + struct net_device *ndev; + + if (!rtnl_trylock()) { + schedule_delayed_work(&ndev_ctx->vfns_work, 1); + return; + } + + ndev = hv_get_drvdata(ndev_ctx->device_ctx); + if (!ndev) + goto out; + + netvsc_event_set_vf_ns(ndev); + +out: + rtnl_unlock(); +} + /* * On Hyper-V, every VF interface is matched with a corresponding * synthetic interface. The synthetic interface is presented first @@ -2799,10 +2824,12 @@ static int netvsc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); + struct net_device_context *ndev_ctx; int ret = 0;
if (event_dev->netdev_ops == &device_ops && event == NETDEV_REGISTER) { - netvsc_event_set_vf_ns(event_dev); + ndev_ctx = netdev_priv(event_dev); + schedule_delayed_work(&ndev_ctx->vfns_work, 0); return NOTIFY_DONE; }
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c index 5620dc271fac..cbf3d4761ce3 100644 --- a/drivers/net/ipa/ipa_smp2p.c +++ b/drivers/net/ipa/ipa_smp2p.c @@ -92,7 +92,7 @@ static void ipa_smp2p_notify(struct ipa_smp2p *smp2p) return;
dev = &smp2p->ipa->pdev->dev; - smp2p->power_on = pm_runtime_get_if_active(dev, true) > 0; + smp2p->power_on = pm_runtime_get_if_active(dev) > 0;
/* Signal whether the IPA power is enabled */ mask = BIT(smp2p->enabled_bit); diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 9a0432145645..6a114883ed8c 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -372,6 +372,8 @@ static const struct kszphy_type ksz8051_type = {
static const struct kszphy_type ksz8081_type = { .led_mode_reg = MII_KSZPHY_CTRL_2, + .cable_diag_reg = KSZ8081_LMD, + .pair_mask = KSZPHY_WIRE_PAIR_MASK, .has_broadcast_disable = true, .has_nand_tree_disable = true, .has_rmii_ref_clk_sel = true, @@ -4720,6 +4722,14 @@ static int lan8841_suspend(struct phy_device *phydev) return genphy_suspend(phydev); }
+static int ksz9131_resume(struct phy_device *phydev) +{ + if (phydev->suspended && phy_interface_is_rgmii(phydev)) + ksz9131_config_rgmii_delay(phydev); + + return kszphy_resume(phydev); +} + static struct phy_driver ksphy_driver[] = { { .phy_id = PHY_ID_KS8737, @@ -4966,7 +4976,7 @@ static struct phy_driver ksphy_driver[] = { .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, .suspend = kszphy_suspend, - .resume = kszphy_resume, + .resume = ksz9131_resume, .cable_test_start = ksz9x31_cable_test_start, .cable_test_get_status = ksz9x31_cable_test_get_status, .get_features = ksz9477_get_features, diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h index 7a962050a4d4..cdb343779a8f 100644 --- a/drivers/net/phy/mscc/mscc.h +++ b/drivers/net/phy/mscc/mscc.h @@ -362,6 +362,13 @@ struct vsc85xx_hw_stat { u16 mask; };
+struct vsc8531_skb_cb { + u32 ns; +}; + +#define VSC8531_SKB_CB(skb) \ + ((struct vsc8531_skb_cb *)((skb)->cb)) + struct vsc8531_private { int rate_magic; u16 supp_led_modes; @@ -410,6 +417,11 @@ struct vsc8531_private { */ struct mutex ts_lock; struct mutex phc_lock; + + /* list of skbs that were received and need timestamp information but it + * didn't received it yet + */ + struct sk_buff_head rx_skbs_list; };
/* Shared structure between the PHYs of the same package. diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 4171f01d34e5..3de72d9cc22b 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -2335,6 +2335,13 @@ static int vsc85xx_probe(struct phy_device *phydev) return vsc85xx_dt_led_modes_get(phydev, default_mode); }
+static void vsc85xx_remove(struct phy_device *phydev) +{ + struct vsc8531_private *priv = phydev->priv; + + skb_queue_purge(&priv->rx_skbs_list); +} + /* Microsemi VSC85xx PHYs */ static struct phy_driver vsc85xx_driver[] = { { @@ -2589,6 +2596,7 @@ static struct phy_driver vsc85xx_driver[] = { .config_intr = &vsc85xx_config_intr, .suspend = &genphy_suspend, .resume = &genphy_resume, + .remove = &vsc85xx_remove, .probe = &vsc8574_probe, .set_wol = &vsc85xx_wol_set, .get_wol = &vsc85xx_wol_get, @@ -2614,6 +2622,7 @@ static struct phy_driver vsc85xx_driver[] = { .config_intr = &vsc85xx_config_intr, .suspend = &genphy_suspend, .resume = &genphy_resume, + .remove = &vsc85xx_remove, .probe = &vsc8574_probe, .set_wol = &vsc85xx_wol_set, .get_wol = &vsc85xx_wol_get, @@ -2639,6 +2648,7 @@ static struct phy_driver vsc85xx_driver[] = { .config_intr = &vsc85xx_config_intr, .suspend = &genphy_suspend, .resume = &genphy_resume, + .remove = &vsc85xx_remove, .probe = &vsc8584_probe, .get_tunable = &vsc85xx_get_tunable, .set_tunable = &vsc85xx_set_tunable, @@ -2662,6 +2672,7 @@ static struct phy_driver vsc85xx_driver[] = { .config_intr = &vsc85xx_config_intr, .suspend = &genphy_suspend, .resume = &genphy_resume, + .remove = &vsc85xx_remove, .probe = &vsc8584_probe, .get_tunable = &vsc85xx_get_tunable, .set_tunable = &vsc85xx_set_tunable, @@ -2685,6 +2696,7 @@ static struct phy_driver vsc85xx_driver[] = { .config_intr = &vsc85xx_config_intr, .suspend = &genphy_suspend, .resume = &genphy_resume, + .remove = &vsc85xx_remove, .probe = &vsc8584_probe, .get_tunable = &vsc85xx_get_tunable, .set_tunable = &vsc85xx_set_tunable, diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c index d0bd6ab45ebe..add1a9ee721a 100644 --- a/drivers/net/phy/mscc/mscc_ptp.c +++ b/drivers/net/phy/mscc/mscc_ptp.c @@ -1193,9 +1193,7 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts, { struct vsc8531_private *vsc8531 = container_of(mii_ts, struct vsc8531_private, mii_ts); - struct skb_shared_hwtstamps *shhwtstamps = NULL; struct vsc85xx_ptphdr *ptphdr; - struct timespec64 ts; unsigned long ns;
if (!vsc8531->ptp->configured) @@ -1205,27 +1203,52 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts, type == PTP_CLASS_NONE) return false;
- vsc85xx_gettime(&vsc8531->ptp->caps, &ts); - ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter); if (!ptphdr) return false;
- shhwtstamps = skb_hwtstamps(skb); - memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); - ns = ntohl(ptphdr->rsrvd2);
- /* nsec is in reserved field */ - if (ts.tv_nsec < ns) - ts.tv_sec--; + VSC8531_SKB_CB(skb)->ns = ns; + skb_queue_tail(&vsc8531->rx_skbs_list, skb);
- shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns); - netif_rx(skb); + ptp_schedule_worker(vsc8531->ptp->ptp_clock, 0);
return true; }
+static long vsc85xx_do_aux_work(struct ptp_clock_info *info) +{ + struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps); + struct skb_shared_hwtstamps *shhwtstamps = NULL; + struct phy_device *phydev = ptp->phydev; + struct vsc8531_private *priv = phydev->priv; + struct sk_buff_head received; + struct sk_buff *rx_skb; + struct timespec64 ts; + unsigned long flags; + + __skb_queue_head_init(&received); + spin_lock_irqsave(&priv->rx_skbs_list.lock, flags); + skb_queue_splice_tail_init(&priv->rx_skbs_list, &received); + spin_unlock_irqrestore(&priv->rx_skbs_list.lock, flags); + + vsc85xx_gettime(info, &ts); + while ((rx_skb = __skb_dequeue(&received)) != NULL) { + shhwtstamps = skb_hwtstamps(rx_skb); + memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); + + if (ts.tv_nsec < VSC8531_SKB_CB(rx_skb)->ns) + ts.tv_sec--; + + shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, + VSC8531_SKB_CB(rx_skb)->ns); + netif_rx(rx_skb); + } + + return -1; +} + static const struct ptp_clock_info vsc85xx_clk_caps = { .owner = THIS_MODULE, .name = "VSC85xx timer", @@ -1239,6 +1262,7 @@ static const struct ptp_clock_info vsc85xx_clk_caps = { .adjfine = &vsc85xx_adjfine, .gettime64 = &vsc85xx_gettime, .settime64 = &vsc85xx_settime, + .do_aux_work = &vsc85xx_do_aux_work, };
static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev) @@ -1566,6 +1590,7 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
mutex_init(&vsc8531->phc_lock); mutex_init(&vsc8531->ts_lock); + skb_queue_head_init(&vsc8531->rx_skbs_list);
/* Retrieve the shared load/save GPIO. Request it as non exclusive as * the same GPIO can be requested by all the PHYs of the same package. diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 4ca813c00947..5f9bb0ebe455 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -786,6 +786,7 @@ static struct phy_driver smsc_phy_driver[] = {
/* PHY_BASIC_FEATURES */
+ .flags = PHY_RST_AFTER_CLK_EN, .probe = smsc_phy_probe,
/* basic functions */ diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index ee1527cf3d0c..28b894bcd7a9 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -33,6 +33,7 @@ #include <linux/ppp_channel.h> #include <linux/ppp-comp.h> #include <linux/skbuff.h> +#include <linux/rculist.h> #include <linux/rtnetlink.h> #include <linux/if_arp.h> #include <linux/ip.h> @@ -1613,11 +1614,14 @@ static int ppp_fill_forward_path(struct net_device_path_ctx *ctx, if (ppp->flags & SC_MULTILINK) return -EOPNOTSUPP;
- if (list_empty(&ppp->channels)) + pch = list_first_or_null_rcu(&ppp->channels, struct channel, clist); + if (!pch) + return -ENODEV; + + chan = READ_ONCE(pch->chan); + if (!chan) return -ENODEV;
- pch = list_first_entry(&ppp->channels, struct channel, clist); - chan = pch->chan; if (!chan->ops->fill_forward_path) return -EOPNOTSUPP;
@@ -3000,7 +3004,7 @@ ppp_unregister_channel(struct ppp_channel *chan) */ down_write(&pch->chan_sem); spin_lock_bh(&pch->downl); - pch->chan = NULL; + WRITE_ONCE(pch->chan, NULL); spin_unlock_bh(&pch->downl); up_write(&pch->chan_sem); ppp_disconnect_channel(pch); @@ -3506,7 +3510,7 @@ ppp_connect_channel(struct channel *pch, int unit) hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ if (hdrlen > ppp->dev->hard_header_len) ppp->dev->hard_header_len = hdrlen; - list_add_tail(&pch->clist, &ppp->channels); + list_add_tail_rcu(&pch->clist, &ppp->channels); ++ppp->n_channels; pch->ppp = ppp; refcount_inc(&ppp->file.refcnt); @@ -3536,10 +3540,11 @@ ppp_disconnect_channel(struct channel *pch) if (ppp) { /* remove it from the ppp unit's list */ ppp_lock(ppp); - list_del(&pch->clist); + list_del_rcu(&pch->clist); if (--ppp->n_channels == 0) wake_up_interruptible(&ppp->file.rwait); ppp_unlock(ppp); + synchronize_net(); if (refcount_dec_and_test(&ppp->file.refcnt)) ppp_destroy_interface(ppp); err = 0; diff --git a/drivers/net/thunderbolt/main.c b/drivers/net/thunderbolt/main.c index 0a53ec293d04..dcaa62377808 100644 --- a/drivers/net/thunderbolt/main.c +++ b/drivers/net/thunderbolt/main.c @@ -396,9 +396,9 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout)
ret = tb_xdomain_disable_paths(net->xd, net->local_transmit_path, - net->rx_ring.ring->hop, + net->tx_ring.ring->hop, net->remote_transmit_path, - net->tx_ring.ring->hop); + net->rx_ring.ring->hop); if (ret) netdev_warn(net->dev, "failed to disable DMA paths\n");
@@ -662,9 +662,9 @@ static void tbnet_connected_work(struct work_struct *work) goto err_free_rx_buffers;
ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path, - net->rx_ring.ring->hop, + net->tx_ring.ring->hop, net->remote_transmit_path, - net->tx_ring.ring->hop); + net->rx_ring.ring->hop); if (ret) { netdev_err(net->dev, "failed to enable DMA paths\n"); goto err_free_tx_buffers; @@ -924,8 +924,12 @@ static int tbnet_open(struct net_device *dev)
netif_carrier_off(dev);
- ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE, - RING_FLAG_FRAME); + flags = RING_FLAG_FRAME; + /* Only enable full E2E if the other end supports it too */ + if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E) + flags |= RING_FLAG_E2E; + + ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE, flags); if (!ring) { netdev_err(dev, "failed to allocate Tx ring\n"); return -ENOMEM; @@ -944,11 +948,6 @@ static int tbnet_open(struct net_device *dev) sof_mask = BIT(TBIP_PDF_FRAME_START); eof_mask = BIT(TBIP_PDF_FRAME_END);
- flags = RING_FLAG_FRAME; - /* Only enable full E2E if the other end supports it too */ - if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E) - flags |= RING_FLAG_E2E; - ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, flags, net->tx_ring.ring->hop, sof_mask, eof_mask, tbnet_start_poll, net); diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 119295f5f3b3..f4340d4ef7ee 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -676,6 +676,7 @@ static int ax88772_init_mdio(struct usbnet *dev) priv->mdio->read = &asix_mdio_bus_read; priv->mdio->write = &asix_mdio_bus_write; priv->mdio->name = "Asix MDIO Bus"; + priv->mdio->phy_mask = ~(BIT(priv->phy_addr & 0x1f) | BIT(AX_EMBD_PHY_ADDR)); /* mii bus name is usb-<usb bus number>-<usb device number> */ snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum); diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index db05622f1f70..d9792fd515a9 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -893,6 +893,10 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ } }
+ if (ctx->func_desc) + ctx->filtering_supported = !!(ctx->func_desc->bmNetworkCapabilities + & USB_CDC_NCM_NCAP_ETH_FILTER); + iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
/* Device-specific flags */ @@ -1898,6 +1902,14 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) } }
+static void cdc_ncm_update_filter(struct usbnet *dev) +{ + struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; + + if (ctx->filtering_supported) + usbnet_cdc_update_filter(dev); +} + static const struct driver_info cdc_ncm_info = { .description = "CDC NCM (NO ZLP)", .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET @@ -1908,7 +1920,7 @@ static const struct driver_info cdc_ncm_info = { .status = cdc_ncm_status, .rx_fixup = cdc_ncm_rx_fixup, .tx_fixup = cdc_ncm_tx_fixup, - .set_rx_mode = usbnet_cdc_update_filter, + .set_rx_mode = cdc_ncm_update_filter, };
/* Same as cdc_ncm_info, but with FLAG_SEND_ZLP */ @@ -1922,7 +1934,7 @@ static const struct driver_info cdc_ncm_zlp_info = { .status = cdc_ncm_status, .rx_fixup = cdc_ncm_rx_fixup, .tx_fixup = cdc_ncm_tx_fixup, - .set_rx_mode = usbnet_cdc_update_filter, + .set_rx_mode = cdc_ncm_update_filter, };
/* Same as cdc_ncm_info, but with FLAG_WWAN */ @@ -1936,7 +1948,7 @@ static const struct driver_info wwan_info = { .status = cdc_ncm_status, .rx_fixup = cdc_ncm_rx_fixup, .tx_fixup = cdc_ncm_tx_fixup, - .set_rx_mode = usbnet_cdc_update_filter, + .set_rx_mode = cdc_ncm_update_filter, };
/* Same as wwan_info, but with FLAG_NOARP */ @@ -1950,7 +1962,7 @@ static const struct driver_info wwan_noarp_info = { .status = cdc_ncm_status, .rx_fixup = cdc_ncm_rx_fixup, .tx_fixup = cdc_ncm_tx_fixup, - .set_rx_mode = usbnet_cdc_update_filter, + .set_rx_mode = cdc_ncm_update_filter, };
static const struct usb_device_id cdc_devs[] = { diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c index 9d8efec46508..39d9aad33bc6 100644 --- a/drivers/net/wireless/ath/ath11k/ce.c +++ b/drivers/net/wireless/ath/ath11k/ce.c @@ -393,9 +393,6 @@ static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe, goto err; }
- /* Make sure descriptor is read after the head pointer. */ - dma_rmb(); - *nbytes = ath11k_hal_ce_dst_status_get_length(desc);
*skb = pipe->dest_ring->skb[sw_index]; diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index 2b7bee666472..33b9764eaa91 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -2662,9 +2662,6 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, try_again: ath11k_hal_srng_access_begin(ab, srng);
- /* Make sure descriptor is read after the head pointer. */ - dma_rmb(); - while (likely(desc = (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab, srng))) { diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c index df493d176062..1215408d1a6a 100644 --- a/drivers/net/wireless/ath/ath11k/hal.c +++ b/drivers/net/wireless/ath/ath11k/hal.c @@ -796,13 +796,23 @@ u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng) { + u32 hp; + lockdep_assert_held(&srng->lock);
if (srng->ring_dir == HAL_SRNG_DIR_SRC) { srng->u.src_ring.cached_tp = *(volatile u32 *)srng->u.src_ring.tp_addr; } else { - srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr); + hp = READ_ONCE(*srng->u.dst_ring.hp_addr); + + if (hp != srng->u.dst_ring.cached_hp) { + srng->u.dst_ring.cached_hp = hp; + /* Make sure descriptor is read after the head + * pointer. + */ + dma_rmb(); + }
/* Try to prefetch the next descriptor in the ring */ if (srng->flags & HAL_SRNG_FLAGS_CACHED) @@ -817,7 +827,6 @@ void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng) { lockdep_assert_held(&srng->lock);
- /* TODO: See if we need a write memory barrier here */ if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) { /* For LMAC rings, ring pointer updates are done through FW and * hence written to a shared memory location that is read by FW @@ -825,21 +834,37 @@ void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng) if (srng->ring_dir == HAL_SRNG_DIR_SRC) { srng->u.src_ring.last_tp = *(volatile u32 *)srng->u.src_ring.tp_addr; - *srng->u.src_ring.hp_addr = srng->u.src_ring.hp; + /* Make sure descriptor is written before updating the + * head pointer. + */ + dma_wmb(); + WRITE_ONCE(*srng->u.src_ring.hp_addr, srng->u.src_ring.hp); } else { srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr; - *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp; + /* Make sure descriptor is read before updating the + * tail pointer. + */ + dma_mb(); + WRITE_ONCE(*srng->u.dst_ring.tp_addr, srng->u.dst_ring.tp); } } else { if (srng->ring_dir == HAL_SRNG_DIR_SRC) { srng->u.src_ring.last_tp = *(volatile u32 *)srng->u.src_ring.tp_addr; + /* Assume implementation use an MMIO write accessor + * which has the required wmb() so that the descriptor + * is written before the updating the head pointer. + */ ath11k_hif_write32(ab, (unsigned long)srng->u.src_ring.hp_addr - (unsigned long)ab->mem, srng->u.src_ring.hp); } else { srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr; + /* Make sure descriptor is read before updating the + * tail pointer. + */ + mb(); ath11k_hif_write32(ab, (unsigned long)srng->u.dst_ring.tp_addr - (unsigned long)ab->mem, diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c index 740586fe49d1..b66d23d6b2bd 100644 --- a/drivers/net/wireless/ath/ath12k/ce.c +++ b/drivers/net/wireless/ath/ath12k/ce.c @@ -343,9 +343,6 @@ static int ath12k_ce_completed_recv_next(struct ath12k_ce_pipe *pipe, goto err; }
- /* Make sure descriptor is read after the head pointer. */ - dma_rmb(); - *nbytes = ath12k_hal_ce_dst_status_get_length(desc);
*skb = pipe->dest_ring->skb[sw_index]; diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c index c663ff990b47..c918f5d12975 100644 --- a/drivers/net/wireless/ath/ath12k/dp.c +++ b/drivers/net/wireless/ath/ath12k/dp.c @@ -74,6 +74,7 @@ int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr) ret = ath12k_dp_rx_peer_frag_setup(ar, addr, vdev_id); if (ret) { ath12k_warn(ab, "failed to setup rx defrag context\n"); + tid--; goto peer_clean; }
@@ -91,7 +92,7 @@ int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr) return -ENOENT; }
- for (; tid >= 0; tid--) + for (tid--; tid >= 0; tid--) ath12k_dp_rx_peer_tid_delete(ar, peer, tid);
spin_unlock_bh(&ab->base_lock); diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c index 169e16c6ed65..2d80cb9f0e7b 100644 --- a/drivers/net/wireless/ath/ath12k/hal.c +++ b/drivers/net/wireless/ath/ath12k/hal.c @@ -1728,13 +1728,24 @@ void *ath12k_hal_srng_src_get_next_reaped(struct ath12k_base *ab,
void ath12k_hal_srng_access_begin(struct ath12k_base *ab, struct hal_srng *srng) { + u32 hp; + lockdep_assert_held(&srng->lock);
- if (srng->ring_dir == HAL_SRNG_DIR_SRC) + if (srng->ring_dir == HAL_SRNG_DIR_SRC) { srng->u.src_ring.cached_tp = *(volatile u32 *)srng->u.src_ring.tp_addr; - else - srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr); + } else { + hp = READ_ONCE(*srng->u.dst_ring.hp_addr); + + if (hp != srng->u.dst_ring.cached_hp) { + srng->u.dst_ring.cached_hp = hp; + /* Make sure descriptor is read after the head + * pointer. + */ + dma_rmb(); + } + } }
/* Update cached ring head/tail pointers to HW. ath12k_hal_srng_access_begin() @@ -1744,7 +1755,6 @@ void ath12k_hal_srng_access_end(struct ath12k_base *ab, struct hal_srng *srng) { lockdep_assert_held(&srng->lock);
- /* TODO: See if we need a write memory barrier here */ if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) { /* For LMAC rings, ring pointer updates are done through FW and * hence written to a shared memory location that is read by FW @@ -1752,21 +1762,37 @@ void ath12k_hal_srng_access_end(struct ath12k_base *ab, struct hal_srng *srng) if (srng->ring_dir == HAL_SRNG_DIR_SRC) { srng->u.src_ring.last_tp = *(volatile u32 *)srng->u.src_ring.tp_addr; - *srng->u.src_ring.hp_addr = srng->u.src_ring.hp; + /* Make sure descriptor is written before updating the + * head pointer. + */ + dma_wmb(); + WRITE_ONCE(*srng->u.src_ring.hp_addr, srng->u.src_ring.hp); } else { srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr; - *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp; + /* Make sure descriptor is read before updating the + * tail pointer. + */ + dma_mb(); + WRITE_ONCE(*srng->u.dst_ring.tp_addr, srng->u.dst_ring.tp); } } else { if (srng->ring_dir == HAL_SRNG_DIR_SRC) { srng->u.src_ring.last_tp = *(volatile u32 *)srng->u.src_ring.tp_addr; + /* Assume implementation use an MMIO write accessor + * which has the required wmb() so that the descriptor + * is written before the updating the head pointer. + */ ath12k_hif_write32(ab, (unsigned long)srng->u.src_ring.hp_addr - (unsigned long)ab->mem, srng->u.src_ring.hp); } else { srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr; + /* Make sure descriptor is read before updating the + * tail pointer. + */ + mb(); ath12k_hif_write32(ab, (unsigned long)srng->u.dst_ring.tp_addr - (unsigned long)ab->mem, diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c index dafd7c34d746..97ed179be228 100644 --- a/drivers/net/wireless/ath/ath12k/hw.c +++ b/drivers/net/wireless/ath/ath12k/hw.c @@ -1002,7 +1002,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .download_calib = true, .supports_suspend = false, .tcl_ring_retry = true, - .reoq_lut_support = false, + .reoq_lut_support = true, .supports_shadow_regs = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274), diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index e918218ce2d6..7e400a0e0eb1 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -4965,6 +4965,11 @@ static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id, dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(msdu); + memset(&info->status, 0, sizeof(info->status)); + + /* skip tx rate update from ieee80211_status*/ + info->status.rates[0].idx = -1; + if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status) info->flags |= IEEE80211_TX_STAT_ACK;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c index 47c0e8e429e5..3064e603e7e3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c @@ -919,7 +919,7 @@ void wlc_lcnphy_read_table(struct brcms_phy *pi, struct phytbl_info *pti)
static void wlc_lcnphy_common_read_table(struct brcms_phy *pi, u32 tbl_id, - const u16 *tbl_ptr, u32 tbl_len, + u16 *tbl_ptr, u32 tbl_len, u32 tbl_width, u32 tbl_offset) { struct phytbl_info tab; diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 0a4aa3c678c1..75118e240619 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -1575,8 +1575,11 @@ il4965_tx_cmd_build_rate(struct il_priv *il, || rate_idx > RATE_COUNT_LEGACY) rate_idx = rate_lowest_index(&il->bands[info->band], sta); /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ - if (info->band == NL80211_BAND_5GHZ) + if (info->band == NL80211_BAND_5GHZ) { rate_idx += IL_FIRST_OFDM_RATE; + if (rate_idx > IL_LAST_OFDM_RATE) + rate_idx = IL_LAST_OFDM_RATE; + } /* Get PLCP rate for tx_cmd->rate_n_flags */ rate_plcp = il_rates[rate_idx].plcp; /* Zero out flags for this packet */ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c index f4a6f76cf193..e70024525eb9 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -2904,7 +2904,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv, /* Repeat initial/next rate. * For legacy IWL_NUMBER_TRY == 1, this loop will not execute. * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */ - while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) { + while (repeat_rate > 0 && index < (LINK_QUAL_MAX_RETRY_NUM - 1)) { if (is_legacy(tbl_type.lq_type)) { if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) ant_toggle_cnt++; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 0a1f302ad6d3..2deb259615d9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -2816,6 +2816,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, struct iwl_fw_dump_desc *desc; unsigned int delay = 0; bool monitor_only = false; + int ret;
if (trigger) { u16 occurrences = le16_to_cpu(trigger->occurrences) - 1; @@ -2846,7 +2847,11 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, desc->trig_desc.type = cpu_to_le32(trig); memcpy(desc->trig_desc.data, str, len);
- return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay); + ret = iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay); + if (ret) + kfree(desc); + + return ret; } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index a82cdd897173..6c108dbbbc54 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -2143,6 +2143,7 @@ static void iwl_mvm_convert_gtk_v2(struct iwl_wowlan_status_data *status,
status->gtk[0].len = data->key_len; status->gtk[0].flags = data->key_flags; + status->gtk[0].id = status->gtk[0].flags & IWL_WOWLAN_GTK_IDX_MASK;
memcpy(status->gtk[0].key, data->key, sizeof(data->key));
@@ -2369,6 +2370,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) * currently used key. */ status->gtk[0].flags = v6->gtk.key_index | BIT(7); + status->gtk[0].id = v6->gtk.key_index; } else if (notif_ver == 7) { struct iwl_wowlan_status_v7 *v7 = (void *)cmd.resp_pkt->data;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index cc866401aad0..8b22779e5b3e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -828,7 +828,7 @@ static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids, int n_channels) { return ((n_ssids <= PROBE_OPTION_MAX) && - (n_channels <= mvm->fw->ucode_capa.n_scan_channels) & + (n_channels <= mvm->fw->ucode_capa.n_scan_channels) && (ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] + ies->len[NL80211_BAND_5GHZ] + ies->len[NL80211_BAND_6GHZ] <= diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index f0226db2e57c..fae9ec98da3b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -2060,16 +2060,21 @@ static int mt7915_load_firmware(struct mt7915_dev *dev) { int ret;
- /* make sure fw is download state */ - if (mt7915_firmware_state(dev, false)) { - /* restart firmware once */ - mt76_connac_mcu_restart(&dev->mt76); - ret = mt7915_firmware_state(dev, false); - if (ret) { - dev_err(dev->mt76.dev, - "Firmware is not ready for download\n"); - return ret; - } + /* Release Semaphore if taken by previous failed attempt */ + ret = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, false); + if (ret != PATCH_REL_SEM_SUCCESS) { + dev_err(dev->mt76.dev, "Could not release semaphore\n"); + /* Continue anyways */ + } + + /* Always restart MCU firmware */ + mt76_connac_mcu_restart(&dev->mt76); + + /* Check if MCU is ready */ + ret = mt7915_firmware_state(dev, false); + if (ret) { + dev_err(dev->mt76.dev, "Firmware did not enter download state\n"); + return ret; }
ret = mt76_connac2_load_patch(&dev->mt76, fw_name_var(dev, ROM_PATCH)); diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 3645f212021f..40112b2c3777 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -573,8 +573,11 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, dma_map_single(&rtlpci->pdev->dev, skb_tail_pointer(skb), rtlpci->rxbuffersize, DMA_FROM_DEVICE); bufferaddress = *((dma_addr_t *)skb->cb); - if (dma_mapping_error(&rtlpci->pdev->dev, bufferaddress)) + if (dma_mapping_error(&rtlpci->pdev->dev, bufferaddress)) { + if (!new_skb) + kfree_skb(skb); return 0; + } rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb; if (rtlpriv->use_new_trx_flow) { /* skb->cb may be 64 bit address */ @@ -803,13 +806,19 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) skb = new_skb; no_new: if (rtlpriv->use_new_trx_flow) { - _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc, - rxring_idx, - rtlpci->rx_ring[rxring_idx].idx); + if (!_rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc, + rxring_idx, + rtlpci->rx_ring[rxring_idx].idx)) { + if (new_skb) + dev_kfree_skb_any(skb); + } } else { - _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc, - rxring_idx, - rtlpci->rx_ring[rxring_idx].idx); + if (!_rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc, + rxring_idx, + rtlpci->rx_ring[rxring_idx].idx)) { + if (new_skb) + dev_kfree_skb_any(skb); + } if (rtlpci->rx_ring[rxring_idx].idx == rtlpci->rxringcount - 1) rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index 21e9ec8768b5..c172ef13c954 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -1996,6 +1996,9 @@ static enum rtw89_ps_mode rtw89_update_ps_mode(struct rtw89_dev *rtwdev) { const struct rtw89_chip_info *chip = rtwdev->chip;
+ if (rtwdev->hci.type != RTW89_HCI_TYPE_PCIE) + return RTW89_PS_MODE_NONE; + if (rtw89_disable_ps_mode || !chip->ps_mode_supported || RTW89_CHK_FW_FEATURE(NO_DEEP_PS, &rtwdev->fw)) return RTW89_PS_MODE_NONE; diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 89b0a7970508..539537360914 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -3427,13 +3427,18 @@ static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_fw_info *fw_info = &rtwdev->fw; const u32 *c2h_reg = chip->c2h_regs; - u32 ret; + u32 ret, timeout; u8 i, val;
info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
+ if (rtwdev->hci.type == RTW89_HCI_TYPE_USB) + timeout = RTW89_C2H_TIMEOUT_USB; + else + timeout = RTW89_C2H_TIMEOUT; + ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, - RTW89_C2H_TIMEOUT, false, rtwdev, + timeout, false, rtwdev, chip->c2h_ctrl_reg); if (ret) { rtw89_warn(rtwdev, "c2h reg timeout\n"); diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index 775f4e8fbda4..bc6a9ea9352e 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -69,6 +69,8 @@ struct rtw89_h2creg_sch_tx_en { #define RTW89_C2HREG_HDR_LEN 2 #define RTW89_H2CREG_HDR_LEN 2 #define RTW89_C2H_TIMEOUT 1000000 +#define RTW89_C2H_TIMEOUT_USB 4000 + struct rtw89_mac_c2h_info { u8 id; u8 content_len; diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 3c818c4b4653..3d63f8b2770e 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1301,6 +1301,23 @@ void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev) rtw89_mac_send_rpwm(rtwdev, state, true); }
+static void rtw89_mac_power_switch_boot_mode(struct rtw89_dev *rtwdev) +{ + u32 boot_mode; + + if (rtwdev->hci.type != RTW89_HCI_TYPE_USB) + return; + + boot_mode = rtw89_read32_mask(rtwdev, R_AX_GPIO_MUXCFG, B_AX_BOOT_MODE); + if (!boot_mode) + return; + + rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFN_ONMAC); + rtw89_write32_clr(rtwdev, R_AX_SYS_STATUS1, B_AX_AUTO_WLPON); + rtw89_write32_clr(rtwdev, R_AX_GPIO_MUXCFG, B_AX_BOOT_MODE); + rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); +} + static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on) { #define PWR_ACT 1 @@ -1310,6 +1327,8 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on) int ret; u8 val;
+ rtw89_mac_power_switch_boot_mode(rtwdev); + if (on) { cfg_seq = chip->pwr_on_seq; cfg_func = chip->ops->pwr_on_func; diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index c0aac4d3678a..ef1162595042 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -172,6 +172,7 @@
#define R_AX_SYS_STATUS1 0x00F4 #define B_AX_SEL_0XC0_MASK GENMASK(17, 16) +#define B_AX_AUTO_WLPON BIT(10) #define B_AX_PAD_HCI_SEL_V2_MASK GENMASK(5, 3) #define MAC_AX_HCI_SEL_SDIO_UART 0 #define MAC_AX_HCI_SEL_MULTI_USB 1 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 0115f8f5b724..5b59c0ee6c66 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -637,8 +637,6 @@ static int xennet_xdp_xmit_one(struct net_device *dev, tx_stats->packets++; u64_stats_update_end(&tx_stats->syncp);
- xennet_tx_buf_gc(queue); - return 0; }
@@ -848,9 +846,6 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev tx_stats->packets++; u64_stats_update_end(&tx_stats->syncp);
- /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ - xennet_tx_buf_gc(queue); - if (!netfront_tx_slot_available(queue)) netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 97ab91a479d1..136dba6221d8 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1755,8 +1755,28 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) * might be pointing at! */ result = nvme_disable_ctrl(&dev->ctrl, false); - if (result < 0) - return result; + if (result < 0) { + struct pci_dev *pdev = to_pci_dev(dev->dev); + + /* + * The NVMe Controller Reset method did not get an expected + * CSTS.RDY transition, so something with the device appears to + * be stuck. Use the lower level and bigger hammer PCIe + * Function Level Reset to attempt restoring the device to its + * initial state, and try again. + */ + result = pcie_reset_flr(pdev, false); + if (result < 0) + return result; + + pci_restore_state(pdev); + result = nvme_disable_ctrl(&dev->ctrl, false); + if (result < 0) + return result; + + dev_info(dev->ctrl.device, + "controller reset completed after pcie flr\n"); + }
result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); if (result) diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index cedfbd425863..23b8dda70cb2 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -1043,7 +1043,10 @@ static const struct pci_epc_features imx8m_pcie_epc_features = { .linkup_notifier = false, .msi_capable = true, .msix_capable = false, - .reserved_bar = 1 << BAR_1 | 1 << BAR_3, + .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5, + .bar_fixed_size = { + [BAR_4] = SZ_256, + }, .align = SZ_64K, };
@@ -1098,8 +1101,6 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie, dev_err(dev, "failed to initialize endpoint\n"); return ret; } - /* Start LTSSM. */ - imx6_pcie_ltssm_enable(dev);
return 0; } diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index 6ff20d585396..c25f32abbdf6 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -11,6 +11,7 @@ * ARM PCI Host generic driver. */
+#include <linux/bitfield.h> #include <linux/bitrev.h> #include <linux/clk.h> #include <linux/delay.h> @@ -40,18 +41,18 @@ static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip) { u32 status;
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE); - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); }
static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip) { u32 status;
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); }
static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip) @@ -269,7 +270,7 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) scale = 3; /* 0.001x */ curr = curr / 1000; /* convert to mA */ power = (curr * 3300) / 1000; /* milliwatt */ - while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) { + while (power > FIELD_MAX(PCI_EXP_DEVCAP_PWR_VAL)) { if (!scale) { dev_warn(rockchip->dev, "invalid power supply\n"); return; @@ -278,10 +279,10 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) power = power / 10; }
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR); - status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) | - (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT); - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCAP); + status |= FIELD_PREP(PCI_EXP_DEVCAP_PWR_VAL, power); + status |= FIELD_PREP(PCI_EXP_DEVCAP_PWR_SCL, scale); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCAP); }
/** @@ -309,14 +310,14 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) rockchip_pcie_set_power_limit(rockchip);
/* Set RC's clock architecture as common clock */ - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); status |= PCI_EXP_LNKSTA_SLC << 16; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
/* Set RC's RCB to 128 */ - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); status |= PCI_EXP_LNKCTL_RCB; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
/* Enable Gen1 training */ rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE, @@ -338,9 +339,13 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) * Enable retrain for gen2. This should be configured only after * gen1 finished. */ - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL2); + status &= ~PCI_EXP_LNKCTL2_TLS; + status |= PCI_EXP_LNKCTL2_TLS_5_0GT; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL2); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); status |= PCI_EXP_LNKCTL_RL; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL, status, PCIE_LINK_IS_GEN2(status), 20, @@ -377,15 +382,15 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
/* Clear L0s from RC's link cap */ if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) { - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP); - status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCAP); + status &= ~PCI_EXP_LNKCAP_ASPM_L0S; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCAP); }
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR); - status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK; - status |= PCIE_RC_CONFIG_DCSR_MPS_256; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCTL); + status &= ~PCI_EXP_DEVCTL_PAYLOAD; + status |= PCI_EXP_DEVCTL_PAYLOAD_256B; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCTL);
return 0; err_power_off_phy: diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h index 15ee949f2485..049ad984a416 100644 --- a/drivers/pci/controller/pcie-rockchip.h +++ b/drivers/pci/controller/pcie-rockchip.h @@ -144,16 +144,7 @@ #define PCIE_EP_CONFIG_BASE 0xa00000 #define PCIE_EP_CONFIG_DID_VID (PCIE_EP_CONFIG_BASE + 0x00) #define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) -#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4) -#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18 -#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff -#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26 -#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8) -#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5) -#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5) -#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc) -#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10) -#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0) +#define PCIE_RC_CONFIG_CR (PCIE_RC_CONFIG_BASE + 0xc0) #define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c) #define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274) #define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20) diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c index 0ea64e24ed61..c17dff4bd19b 100644 --- a/drivers/pci/endpoint/pci-ep-cfs.c +++ b/drivers/pci/endpoint/pci-ep-cfs.c @@ -683,6 +683,7 @@ void pci_ep_cfs_remove_epf_group(struct config_group *group) if (IS_ERR_OR_NULL(group)) return;
+ list_del(&group->group_entry); configfs_unregister_default_group(group); } EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group); diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index 059f8639f21e..03673ade6ce4 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c @@ -311,7 +311,7 @@ static void pci_epf_remove_cfs(struct pci_epf_driver *driver) mutex_lock(&pci_epf_mutex); list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry) pci_ep_cfs_remove_epf_group(group); - list_del(&driver->epf_group); + WARN_ON(!list_empty(&driver->epf_group)); mutex_unlock(&pci_epf_mutex); }
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 05b7357bd258..61bded8623d2 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -793,13 +793,11 @@ int pci_acpi_program_hp_params(struct pci_dev *dev) bool pciehp_is_native(struct pci_dev *bridge) { const struct pci_host_bridge *host; - u32 slot_cap;
if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) return false;
- pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap); - if (!(slot_cap & PCI_EXP_SLTCAP_HPC)) + if (!bridge->is_pciehp) return false;
if (pcie_ports_native) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 4541dfbf0e1b..df7f7e2ed006 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -2475,7 +2475,7 @@ static void pci_pme_list_scan(struct work_struct *work) * course of the call. */ if (bdev) { - bref = pm_runtime_get_if_active(bdev, true); + bref = pm_runtime_get_if_active(bdev); if (!bref) continue;
@@ -3065,8 +3065,12 @@ static const struct dmi_system_id bridge_d3_blacklist[] = { * pci_bridge_d3_possible - Is it possible to put the bridge into D3 * @bridge: Bridge to check * - * This function checks if it is possible to move the bridge to D3. - * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt. + * Currently we only allow D3 for some PCIe ports and for Thunderbolt. + * + * Return: Whether it is possible to move the bridge to D3. + * + * The return value is guaranteed to be constant across the entire lifetime + * of the bridge, including its hot-removal. */ bool pci_bridge_d3_possible(struct pci_dev *bridge) { diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index b7cec139d816..5557290b63dc 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1594,7 +1594,7 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, ®32); if (reg32 & PCI_EXP_SLTCAP_HPC) - pdev->is_hotplug_bridge = 1; + pdev->is_hotplug_bridge = pdev->is_pciehp = 1; }
static void set_pcie_thunderbolt(struct pci_dev *dev) diff --git a/drivers/perf/cxl_pmu.c b/drivers/perf/cxl_pmu.c index 308c9969642e..c03df0f52889 100644 --- a/drivers/perf/cxl_pmu.c +++ b/drivers/perf/cxl_pmu.c @@ -881,7 +881,7 @@ static int cxl_pmu_probe(struct device *dev) return rc; irq = rc;
- irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_overflow\n", dev_name); + irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_overflow", dev_name); if (!irq_name) return -ENOMEM;
diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c index 89c9d74e3546..a5e60039a264 100644 --- a/drivers/phy/qualcomm/phy-qcom-m31.c +++ b/drivers/phy/qualcomm/phy-qcom-m31.c @@ -58,14 +58,16 @@ #define USB2_0_TX_ENABLE BIT(2)
#define USB2PHY_USB_PHY_M31_XCFGI_4 0xc8 - #define HSTX_SLEW_RATE_565PS GENMASK(1, 0) + #define HSTX_SLEW_RATE_400PS GENMASK(2, 0) #define PLL_CHARGING_PUMP_CURRENT_35UA GENMASK(4, 3) #define ODT_VALUE_38_02_OHM GENMASK(7, 6)
#define USB2PHY_USB_PHY_M31_XCFGI_5 0xcc - #define ODT_VALUE_45_02_OHM BIT(2) #define HSTX_PRE_EMPHASIS_LEVEL_0_55MA BIT(0)
+#define USB2PHY_USB_PHY_M31_XCFGI_9 0xdc + #define HSTX_CURRENT_17_1MA_385MV BIT(1) + #define USB2PHY_USB_PHY_M31_XCFGI_11 0xe4 #define XCFG_COARSE_TUNE_NUM BIT(1) #define XCFG_FINE_TUNE_NUM BIT(3) @@ -120,7 +122,7 @@ static struct m31_phy_regs m31_ipq5332_regs[] = { }, { USB2PHY_USB_PHY_M31_XCFGI_4, - HSTX_SLEW_RATE_565PS | PLL_CHARGING_PUMP_CURRENT_35UA | ODT_VALUE_38_02_OHM, + HSTX_SLEW_RATE_400PS | PLL_CHARGING_PUMP_CURRENT_35UA | ODT_VALUE_38_02_OHM, 0 }, { @@ -130,9 +132,13 @@ static struct m31_phy_regs m31_ipq5332_regs[] = { }, { USB2PHY_USB_PHY_M31_XCFGI_5, - ODT_VALUE_45_02_OHM | HSTX_PRE_EMPHASIS_LEVEL_0_55MA, + HSTX_PRE_EMPHASIS_LEVEL_0_55MA, 4 }, + { + USB2PHY_USB_PHY_M31_XCFGI_9, + HSTX_CURRENT_17_1MA_385MV, + }, { USB_PHY_UTMI_CTRL5, 0x0, diff --git a/drivers/phy/rockchip/phy-rockchip-pcie.c b/drivers/phy/rockchip/phy-rockchip-pcie.c index 8234b83fdd88..cbf3c140a138 100644 --- a/drivers/phy/rockchip/phy-rockchip-pcie.c +++ b/drivers/phy/rockchip/phy-rockchip-pcie.c @@ -31,9 +31,8 @@ #define PHY_CFG_ADDR_SHIFT 1 #define PHY_CFG_DATA_MASK 0xf #define PHY_CFG_ADDR_MASK 0x3f -#define PHY_CFG_RD_MASK 0x3ff #define PHY_CFG_WR_ENABLE 1 -#define PHY_CFG_WR_DISABLE 1 +#define PHY_CFG_WR_DISABLE 0 #define PHY_CFG_WR_SHIFT 0 #define PHY_CFG_WR_MASK 1 #define PHY_CFG_PLL_LOCK 0x10 diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index 84121b125d90..67c2791ee246 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c @@ -418,6 +418,7 @@ static struct irq_chip stm32_gpio_irq_chip = { .irq_set_wake = irq_chip_set_wake_parent, .irq_request_resources = stm32_gpio_irq_request_resources, .irq_release_resources = stm32_gpio_irq_release_resources, + .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? irq_chip_set_affinity_parent : NULL, };
static int stm32_gpio_domain_translate(struct irq_domain *d, diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c index 47d19f7e295a..e82f433e542e 100644 --- a/drivers/platform/chrome/cros_ec.c +++ b/drivers/platform/chrome/cros_ec.c @@ -313,6 +313,9 @@ EXPORT_SYMBOL(cros_ec_register); */ void cros_ec_unregister(struct cros_ec_device *ec_dev) { + if (ec_dev->mkbp_event_supported) + blocking_notifier_chain_unregister(&ec_dev->event_notifier, + &ec_dev->notifier_ready); platform_device_unregister(ec_dev->pd); platform_device_unregister(ec_dev->ec); mutex_destroy(&ec_dev->lock); diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c index 66fdc6fa73ec..76807ceb313a 100644 --- a/drivers/platform/chrome/cros_ec_typec.c +++ b/drivers/platform/chrome/cros_ec_typec.c @@ -1179,8 +1179,8 @@ static int cros_typec_probe(struct platform_device *pdev)
typec->ec = dev_get_drvdata(pdev->dev.parent); if (!typec->ec) { - dev_err(dev, "couldn't find parent EC device\n"); - return -ENODEV; + dev_warn(dev, "couldn't find parent EC device\n"); + return -EPROBE_DEFER; }
platform_set_drvdata(pdev, typec); diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c index 7ed12c1d3b34..04686ae1e976 100644 --- a/drivers/platform/x86/amd/pmc/pmc-quirks.c +++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c @@ -189,6 +189,15 @@ static const struct dmi_system_id fwbug_list[] = { DMI_MATCH(DMI_PRODUCT_NAME, "82XQ"), } }, + /* https://gitlab.freedesktop.org/drm/amd/-/issues/4434 */ + { + .ident = "Lenovo Yoga 6 13ALC6", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82ND"), + } + }, /* https://gitlab.freedesktop.org/drm/amd/-/issues/2684 */ { .ident = "HP Laptop 15s-eq2xxx", diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 8de0d3232e48..88364a5502e6 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -537,12 +537,12 @@ static unsigned long __init tpacpi_check_quirks( return 0; }
-static inline bool __pure __init tpacpi_is_lenovo(void) +static __always_inline bool __pure __init tpacpi_is_lenovo(void) { return thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO; }
-static inline bool __pure __init tpacpi_is_ibm(void) +static __always_inline bool __pure __init tpacpi_is_ibm(void) { return thinkpad_id.vendor == PCI_VENDOR_ID_IBM; } diff --git a/drivers/pmdomain/imx/imx8m-blk-ctrl.c b/drivers/pmdomain/imx/imx8m-blk-ctrl.c index cc5ef6e2f0a8..0dfaf1d14035 100644 --- a/drivers/pmdomain/imx/imx8m-blk-ctrl.c +++ b/drivers/pmdomain/imx/imx8m-blk-ctrl.c @@ -664,6 +664,11 @@ static const struct imx8m_blk_ctrl_data imx8mn_disp_blk_ctl_dev_data = { #define LCDIF_1_RD_HURRY GENMASK(15, 13) #define LCDIF_0_RD_HURRY GENMASK(12, 10)
+#define ISI_CACHE_CTRL 0x50 +#define ISI_V_WR_HURRY GENMASK(28, 26) +#define ISI_U_WR_HURRY GENMASK(25, 23) +#define ISI_Y_WR_HURRY GENMASK(22, 20) + static int imx8mp_media_power_notifier(struct notifier_block *nb, unsigned long action, void *data) { @@ -693,6 +698,11 @@ static int imx8mp_media_power_notifier(struct notifier_block *nb, regmap_set_bits(bc->regmap, LCDIF_ARCACHE_CTRL, FIELD_PREP(LCDIF_1_RD_HURRY, 7) | FIELD_PREP(LCDIF_0_RD_HURRY, 7)); + /* Same here for ISI */ + regmap_set_bits(bc->regmap, ISI_CACHE_CTRL, + FIELD_PREP(ISI_V_WR_HURRY, 7) | + FIELD_PREP(ISI_U_WR_HURRY, 7) | + FIELD_PREP(ISI_Y_WR_HURRY, 7)); }
return NOTIFY_OK; diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c index 5b3681b9100c..190e8a4cfa97 100644 --- a/drivers/power/supply/qcom_battmgr.c +++ b/drivers/power/supply/qcom_battmgr.c @@ -977,6 +977,8 @@ static unsigned int qcom_battmgr_sc8280xp_parse_technology(const char *chemistry { if (!strncmp(chemistry, "LIO", BATTMGR_CHEMISTRY_LEN)) return POWER_SUPPLY_TECHNOLOGY_LION; + if (!strncmp(chemistry, "LIP", BATTMGR_CHEMISTRY_LEN)) + return POWER_SUPPLY_TECHNOLOGY_LIPO;
pr_err("Unknown battery technology '%s'\n", chemistry); return POWER_SUPPLY_TECHNOLOGY_UNKNOWN; diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c index bf3b6f1aa984..41e1fdbcda16 100644 --- a/drivers/pps/clients/pps-gpio.c +++ b/drivers/pps/clients/pps-gpio.c @@ -206,8 +206,8 @@ static int pps_gpio_probe(struct platform_device *pdev) }
/* register IRQ interrupt handler */ - ret = devm_request_irq(dev, data->irq, pps_gpio_irq_handler, - get_irqf_trigger_flags(data), data->info.name, data); + ret = request_irq(data->irq, pps_gpio_irq_handler, + get_irqf_trigger_flags(data), data->info.name, data); if (ret) { pps_unregister_source(data->pps); dev_err(dev, "failed to acquire IRQ %d\n", data->irq); @@ -224,6 +224,7 @@ static int pps_gpio_remove(struct platform_device *pdev) { struct pps_gpio_device_data *data = platform_get_drvdata(pdev);
+ free_irq(data->irq, data); pps_unregister_source(data->pps); del_timer_sync(&data->echo_timer); /* reset echo pin in any case */ diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index b7fc260ed43b..0682bb340221 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -79,7 +79,7 @@ static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
if (ptp_clock_freerun(ptp)) { - pr_err("ptp: physical clock is free running\n"); + pr_err_ratelimited("ptp: physical clock is free running\n"); return -EBUSY; }
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index a54124269c2f..3fbd1d68a9bc 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -20,6 +20,11 @@ #define PTP_BUF_TIMESTAMPS 30 #define PTP_DEFAULT_MAX_VCLOCKS 20
+enum { + PTP_LOCK_PHYSICAL = 0, + PTP_LOCK_VIRTUAL, +}; + struct timestamp_event_queue { struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS]; int head; diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c index dcf752c9e045..7d08ff3b30fc 100644 --- a/drivers/ptp/ptp_vclock.c +++ b/drivers/ptp/ptp_vclock.c @@ -154,6 +154,11 @@ static long ptp_vclock_refresh(struct ptp_clock_info *ptp) return PTP_VCLOCK_REFRESH_INTERVAL; }
+static void ptp_vclock_set_subclass(struct ptp_clock *ptp) +{ + lockdep_set_subclass(&ptp->clock.rwsem, PTP_LOCK_VIRTUAL); +} + static const struct ptp_clock_info ptp_vclock_info = { .owner = THIS_MODULE, .name = "ptp virtual clock", @@ -213,6 +218,8 @@ struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock) return NULL; }
+ ptp_vclock_set_subclass(vclock->clock); + timecounter_init(&vclock->tc, &vclock->cc, 0); ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL);
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c index 2fc6163eace3..6591f8f84ce8 100644 --- a/drivers/pwm/pwm-imx-tpm.c +++ b/drivers/pwm/pwm-imx-tpm.c @@ -204,6 +204,15 @@ static int pwm_imx_tpm_apply_hw(struct pwm_chip *chip, val |= FIELD_PREP(PWM_IMX_TPM_SC_PS, p->prescale); writel(val, tpm->base + PWM_IMX_TPM_SC);
+ /* + * if the counter is disabled (CMOD == 0), programming the new + * period length (MOD) will not reset the counter (CNT). If + * CNT.COUNT happens to be bigger than the new MOD value then + * the counter will end up being reset way too late. Therefore, + * manually reset it to 0. + */ + if (!cmod) + writel(0x0, tpm->base + PWM_IMX_TPM_CNT); /* * set period count: * if the PWM is disabled (CMOD[1:0] = 2b00), then MOD register diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c index ff7c70a0033d..34413607b86f 100644 --- a/drivers/pwm/pwm-mediatek.c +++ b/drivers/pwm/pwm-mediatek.c @@ -118,6 +118,26 @@ static inline void pwm_mediatek_writel(struct pwm_mediatek_chip *chip, writel(value, chip->regs + chip->soc->reg_offset[num] + offset); }
+static void pwm_mediatek_enable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip); + u32 value; + + value = readl(pc->regs); + value |= BIT(pwm->hwpwm); + writel(value, pc->regs); +} + +static void pwm_mediatek_disable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip); + u32 value; + + value = readl(pc->regs); + value &= ~BIT(pwm->hwpwm); + writel(value, pc->regs); +} + static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns) { @@ -147,7 +167,10 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, do_div(resolution, clk_rate);
cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000, resolution); - while (cnt_period > 8191) { + if (!cnt_period) + return -EINVAL; + + while (cnt_period > 8192) { resolution *= 2; clkdiv++; cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000, @@ -170,9 +193,16 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, }
cnt_duty = DIV_ROUND_CLOSEST_ULL((u64)duty_ns * 1000, resolution); + pwm_mediatek_writel(pc, pwm->hwpwm, PWMCON, BIT(15) | clkdiv); - pwm_mediatek_writel(pc, pwm->hwpwm, reg_width, cnt_period); - pwm_mediatek_writel(pc, pwm->hwpwm, reg_thres, cnt_duty); + pwm_mediatek_writel(pc, pwm->hwpwm, reg_width, cnt_period - 1); + + if (cnt_duty) { + pwm_mediatek_writel(pc, pwm->hwpwm, reg_thres, cnt_duty - 1); + pwm_mediatek_enable(chip, pwm); + } else { + pwm_mediatek_disable(chip, pwm); + }
out: pwm_mediatek_clk_disable(chip, pwm); @@ -180,35 +210,6 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, return ret; }
-static int pwm_mediatek_enable(struct pwm_chip *chip, struct pwm_device *pwm) -{ - struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip); - u32 value; - int ret; - - ret = pwm_mediatek_clk_enable(chip, pwm); - if (ret < 0) - return ret; - - value = readl(pc->regs); - value |= BIT(pwm->hwpwm); - writel(value, pc->regs); - - return 0; -} - -static void pwm_mediatek_disable(struct pwm_chip *chip, struct pwm_device *pwm) -{ - struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip); - u32 value; - - value = readl(pc->regs); - value &= ~BIT(pwm->hwpwm); - writel(value, pc->regs); - - pwm_mediatek_clk_disable(chip, pwm); -} - static int pwm_mediatek_apply(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { @@ -218,8 +219,10 @@ static int pwm_mediatek_apply(struct pwm_chip *chip, struct pwm_device *pwm, return -EINVAL;
if (!state->enabled) { - if (pwm->state.enabled) + if (pwm->state.enabled) { pwm_mediatek_disable(chip, pwm); + pwm_mediatek_clk_disable(chip, pwm); + }
return 0; } @@ -229,7 +232,7 @@ static int pwm_mediatek_apply(struct pwm_chip *chip, struct pwm_device *pwm, return err;
if (!pwm->state.enabled) - err = pwm_mediatek_enable(chip, pwm); + err = pwm_mediatek_clk_enable(chip, pwm);
return err; } diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index 610a69928dff..251f9840d85b 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c @@ -1088,8 +1088,8 @@ static int imx_rproc_clk_enable(struct imx_rproc *priv) struct device *dev = priv->dev; int ret;
- /* Remote core is not under control of Linux */ - if (dcfg->method == IMX_RPROC_NONE) + /* Remote core is not under control of Linux or it is managed by SCU API */ + if (dcfg->method == IMX_RPROC_NONE || dcfg->method == IMX_RPROC_SCU_API) return 0;
priv->clk = devm_clk_get(dev, NULL); diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index ccd59ddd7610..9f25eb3aec25 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -51,8 +51,8 @@ config RESET_BERLIN
config RESET_BRCMSTB tristate "Broadcom STB reset controller" - depends on ARCH_BRCMSTB || COMPILE_TEST - default ARCH_BRCMSTB + depends on ARCH_BRCMSTB || ARCH_BCM2835 || COMPILE_TEST + default ARCH_BRCMSTB || ARCH_BCM2835 help This enables the reset controller driver for Broadcom STB SoCs using a SUN_TOP_CTRL_SW_INIT style controller. @@ -60,11 +60,11 @@ config RESET_BRCMSTB config RESET_BRCMSTB_RESCAL tristate "Broadcom STB RESCAL reset controller" depends on HAS_IOMEM - depends on ARCH_BRCMSTB || COMPILE_TEST - default ARCH_BRCMSTB + depends on ARCH_BRCMSTB || ARCH_BCM2835 || COMPILE_TEST + default ARCH_BRCMSTB || ARCH_BCM2835 help This enables the RESCAL reset controller for SATA, PCIe0, or PCIe1 on - BCM7216. + BCM7216 or the BCM2712.
config RESET_HSDK bool "Synopsys HSDK Reset Driver" diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index e14981383c01..74aad2b12460 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -274,6 +274,13 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t) if (tmp & DS1340_BIT_OSF) return -EINVAL; break; + case ds_1341: + ret = regmap_read(ds1307->regmap, DS1337_REG_STATUS, &tmp); + if (ret) + return ret; + if (tmp & DS1337_BIT_OSF) + return -EINVAL; + break; case ds_1388: ret = regmap_read(ds1307->regmap, DS1388_REG_FLAG, &tmp); if (ret) @@ -372,6 +379,10 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) regmap_update_bits(ds1307->regmap, DS1340_REG_FLAG, DS1340_BIT_OSF, 0); break; + case ds_1341: + regmap_update_bits(ds1307->regmap, DS1337_REG_STATUS, + DS1337_BIT_OSF, 0); + break; case ds_1388: regmap_update_bits(ds1307->regmap, DS1388_REG_FLAG, DS1388_BIT_OSF, 0); @@ -1808,10 +1819,8 @@ static int ds1307_probe(struct i2c_client *client) regmap_write(ds1307->regmap, DS1337_REG_CONTROL, regs[0]);
- /* oscillator fault? clear flag, and warn */ + /* oscillator fault? warn */ if (regs[1] & DS1337_BIT_OSF) { - regmap_write(ds1307->regmap, DS1337_REG_STATUS, - regs[1] & ~DS1337_BIT_OSF); dev_warn(ds1307->dev, "SET TIME!\n"); } break; diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 6fa0fb35e521..3409822dfbb4 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -76,6 +76,13 @@ unsigned long sclp_console_full; /* The currently active SCLP command word. */ static sclp_cmdw_t active_cmd;
+static inline struct sccb_header *sclpint_to_sccb(u32 sccb_int) +{ + if (sccb_int) + return __va(sccb_int); + return NULL; +} + static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err) { struct sclp_trace_entry e; @@ -620,7 +627,7 @@ __sclp_find_req(u32 sccb)
static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd) { - struct sccb_header *sccb = (struct sccb_header *)__va(sccb_int); + struct sccb_header *sccb = sclpint_to_sccb(sccb_int); struct evbuf_header *evbuf; u16 response;
@@ -659,7 +666,7 @@ static void sclp_interrupt_handler(struct ext_code ext_code,
/* INT: Interrupt received (a=intparm, b=cmd) */ sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd, - (struct sccb_header *)__va(finished_sccb), + sclpint_to_sccb(finished_sccb), !ok_response(finished_sccb, active_cmd));
if (finished_sccb) { diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 0f64b0244303..31b95e6c96c5 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -481,8 +481,7 @@ void aac_define_int_mode(struct aac_dev *dev) pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) { min_msix = 2; i = pci_alloc_irq_vectors(dev->pdev, - min_msix, msi_count, - PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); + min_msix, msi_count, PCI_IRQ_MSIX); if (i > 0) { dev->msi_enabled = 1; msi_count = i; diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index a9d3d8562d3c..0ec76d1cb6fb 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -706,6 +706,7 @@ bfad_im_probe(struct bfad_s *bfad)
if (bfad_thread_workq(bfad) != BFA_STATUS_OK) { kfree(im); + bfad->im = NULL; return BFA_STATUS_FAILED; }
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 0fda8905eabd..916c07648460 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -3184,7 +3184,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, return NULL; conn = cls_conn->dd_data;
- conn->dd_data = cls_conn->dd_data + sizeof(*conn); + if (dd_size) + conn->dd_data = cls_conn->dd_data + sizeof(*conn); conn->session = session; conn->cls_conn = cls_conn; conn->c_stage = ISCSI_CONN_INITIAL_STAGE; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 20662b4f339e..6b6c964e8076 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -6291,7 +6291,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) } phba->nvmeio_trc_on = 1; phba->nvmeio_trc_output_idx = 0; - phba->nvmeio_trc = NULL; } else { nvmeio_off: phba->nvmeio_trc_size = 0; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 070654cc9292..dcbb2432c978 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -390,6 +390,10 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) return;
+ /* may be called before queues established if hba_setup fails */ + if (!phba->sli4_hba.hdwq) + return; + spin_lock_irqsave(&phba->hbalock, iflag); for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { qp = &phba->sli4_hba.hdwq[idx]; diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h index ae98d15c30b1..90da783457fb 100644 --- a/drivers/scsi/mpi3mr/mpi3mr.h +++ b/drivers/scsi/mpi3mr/mpi3mr.h @@ -1025,6 +1025,8 @@ struct scmd_priv { * @logdata_buf: Circular buffer to store log data entries * @logdata_buf_idx: Index of entry in buffer to store * @logdata_entry_sz: log data entry size + * @adm_req_q_bar_writeq_lock: Admin request queue lock + * @adm_reply_q_bar_writeq_lock: Admin reply queue lock * @pend_large_data_sz: Counter to track pending large data * @io_throttle_data_length: I/O size to track in 512b blocks * @io_throttle_high: I/O size to start throttle in 512b blocks @@ -1055,7 +1057,7 @@ struct mpi3mr_ioc { char name[MPI3MR_NAME_LENGTH]; char driver_name[MPI3MR_NAME_LENGTH];
- volatile struct mpi3_sysif_registers __iomem *sysif_regs; + struct mpi3_sysif_registers __iomem *sysif_regs; resource_size_t sysif_regs_phys; int bars; u64 dma_mask; @@ -1207,6 +1209,8 @@ struct mpi3mr_ioc { u8 *logdata_buf; u16 logdata_buf_idx; u16 logdata_entry_sz; + spinlock_t adm_req_q_bar_writeq_lock; + spinlock_t adm_reply_q_bar_writeq_lock;
atomic_t pend_large_data_sz; u32 io_throttle_data_length; diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c index 60714a6c2637..b03e4b8cb67d 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_fw.c +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -23,17 +23,22 @@ module_param(poll_queues, int, 0444); MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
#if defined(writeq) && defined(CONFIG_64BIT) -static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) +static inline void mpi3mr_writeq(__u64 b, void __iomem *addr, + spinlock_t *write_queue_lock) { writeq(b, addr); } #else -static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) +static inline void mpi3mr_writeq(__u64 b, void __iomem *addr, + spinlock_t *write_queue_lock) { __u64 data_out = b; + unsigned long flags;
+ spin_lock_irqsave(write_queue_lock, flags); writel((u32)(data_out), addr); writel((u32)(data_out >> 32), (addr + 4)); + spin_unlock_irqrestore(write_queue_lock, flags); } #endif
@@ -411,8 +416,8 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, MPI3MR_SENSE_BUF_SZ); } if (cmdptr->is_waiting) { - complete(&cmdptr->done); cmdptr->is_waiting = 0; + complete(&cmdptr->done); } else if (cmdptr->callback) cmdptr->callback(mrioc, cmdptr); } @@ -2666,9 +2671,11 @@ static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc) (mrioc->num_admin_req); writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries); mpi3mr_writeq(mrioc->admin_req_dma, - &mrioc->sysif_regs->admin_request_queue_address); + &mrioc->sysif_regs->admin_request_queue_address, + &mrioc->adm_req_q_bar_writeq_lock); mpi3mr_writeq(mrioc->admin_reply_dma, - &mrioc->sysif_regs->admin_reply_queue_address); + &mrioc->sysif_regs->admin_reply_queue_address, + &mrioc->adm_reply_q_bar_writeq_lock); writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); return retval; diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index 7880675a68db..1bf3572c7aac 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -49,6 +49,13 @@ static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
#define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE)
+/* + * SAS Log info code for a NCQ collateral abort after an NCQ error: + * IOC_LOGINFO_PREFIX_PL | PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR + * See: drivers/message/fusion/lsi/mpi_log_sas.h + */ +#define IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR 0x31080000 + /** * mpi3mr_host_tag_for_scmd - Get host tag for a scmd * @mrioc: Adapter instance reference @@ -3270,7 +3277,18 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, scmd->result = DID_NO_CONNECT << 16; break; case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: - scmd->result = DID_SOFT_ERROR << 16; + if (ioc_loginfo == IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR) { + /* + * This is a ATA NCQ command aborted due to another NCQ + * command failure. We must retry this command + * immediately but without incrementing its retry + * counter. + */ + WARN_ON_ONCE(xfer_count != 0); + scmd->result = DID_IMM_RETRY << 16; + } else { + scmd->result = DID_SOFT_ERROR << 16; + } break; case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED: case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED: @@ -5084,6 +5102,8 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&mrioc->tgtdev_lock); spin_lock_init(&mrioc->watchdog_lock); spin_lock_init(&mrioc->chain_buf_lock); + spin_lock_init(&mrioc->adm_req_q_bar_writeq_lock); + spin_lock_init(&mrioc->adm_reply_q_bar_writeq_lock); spin_lock_init(&mrioc->sas_node_lock);
INIT_LIST_HEAD(&mrioc->fwevt_list); diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 0afa485fb300..7bef42a2fb57 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -196,6 +196,14 @@ struct sense_info { #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD) #define MPT3SAS_ABRT_TASK_SET (0xFFFE) #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) + +/* + * SAS Log info code for a NCQ collateral abort after an NCQ error: + * IOC_LOGINFO_PREFIX_PL | PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR + * See: drivers/message/fusion/lsi/mpi_log_sas.h + */ +#define IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR 0x31080000 + /** * struct fw_event_work - firmware event struct * @list: link list framework @@ -5824,6 +5832,17 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) scmd->result = DID_TRANSPORT_DISRUPTED << 16; goto out; } + if (log_info == IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR) { + /* + * This is a ATA NCQ command aborted due to another NCQ + * command failure. We must retry this command + * immediately but without incrementing its retry + * counter. + */ + WARN_ON_ONCE(xfer_cnt != 0); + scmd->result = DID_IMM_RETRY << 16; + break; + } if (log_info == 0x31110630) { if (scmd->retries > 2) { scmd->result = DID_NO_CONNECT << 16; diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 77c28d2ebf01..d91efd7c983c 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -6606,6 +6606,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0); vfree(dst_addr); + if (IS_ERR(ep)) + return NULL; return ep; }
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index cead0fbbe5db..8ee74dddef16 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -1851,7 +1851,7 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
return 0; } - +EXPORT_SYMBOL(scsi_scan_host_selected); static void scsi_sysfs_add_devices(struct Scsi_Host *shost) { struct scsi_device *sdev; diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 7fdd2b61fe85..7b4c4752e216 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -40,6 +40,8 @@ #include <scsi/scsi_transport_sas.h>
#include "scsi_sas_internal.h" +#include "scsi_priv.h" + struct sas_host_attrs { struct list_head rphy_list; struct mutex lock; @@ -1681,32 +1683,66 @@ int scsi_is_sas_rphy(const struct device *dev) } EXPORT_SYMBOL(scsi_is_sas_rphy);
- -/* - * SCSI scan helper - */ - -static int sas_user_scan(struct Scsi_Host *shost, uint channel, - uint id, u64 lun) +static void scan_channel_zero(struct Scsi_Host *shost, uint id, u64 lun) { struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); struct sas_rphy *rphy;
- mutex_lock(&sas_host->lock); list_for_each_entry(rphy, &sas_host->rphy_list, list) { if (rphy->identify.device_type != SAS_END_DEVICE || rphy->scsi_target_id == -1) continue;
- if ((channel == SCAN_WILD_CARD || channel == 0) && - (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) { + if (id == SCAN_WILD_CARD || id == rphy->scsi_target_id) { scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun, SCSI_SCAN_MANUAL); } } - mutex_unlock(&sas_host->lock); +}
- return 0; +/* + * SCSI scan helper + */ + +static int sas_user_scan(struct Scsi_Host *shost, uint channel, + uint id, u64 lun) +{ + struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); + int res = 0; + int i; + + switch (channel) { + case 0: + mutex_lock(&sas_host->lock); + scan_channel_zero(shost, id, lun); + mutex_unlock(&sas_host->lock); + break; + + case SCAN_WILD_CARD: + mutex_lock(&sas_host->lock); + scan_channel_zero(shost, id, lun); + mutex_unlock(&sas_host->lock); + + for (i = 1; i <= shost->max_channel; i++) { + res = scsi_scan_host_selected(shost, i, id, lun, + SCSI_SCAN_MANUAL); + if (res) + goto exit_scan; + } + break; + + default: + if (channel < shost->max_channel) { + res = scsi_scan_host_selected(shost, channel, id, lun, + SCSI_SCAN_MANUAL); + } else { + res = -EINVAL; + } + break; + } + +exit_scan: + return res; }
diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index 6f177e46fa0f..a6773075bfe3 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -17,6 +17,37 @@ #include <linux/slab.h> #include <linux/soc/qcom/mdt_loader.h>
+static bool mdt_header_valid(const struct firmware *fw) +{ + const struct elf32_hdr *ehdr; + size_t phend; + size_t shend; + + if (fw->size < sizeof(*ehdr)) + return false; + + ehdr = (struct elf32_hdr *)fw->data; + + if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) + return false; + + if (ehdr->e_phentsize != sizeof(struct elf32_phdr)) + return false; + + phend = size_add(size_mul(sizeof(struct elf32_phdr), ehdr->e_phnum), ehdr->e_phoff); + if (phend > fw->size) + return false; + + if (ehdr->e_shentsize != sizeof(struct elf32_shdr)) + return false; + + shend = size_add(size_mul(sizeof(struct elf32_shdr), ehdr->e_shnum), ehdr->e_shoff); + if (shend > fw->size) + return false; + + return true; +} + static bool mdt_phdr_valid(const struct elf32_phdr *phdr) { if (phdr->p_type != PT_LOAD) @@ -84,8 +115,11 @@ ssize_t qcom_mdt_get_size(const struct firmware *fw) phys_addr_t max_addr = 0; int i;
+ if (!mdt_header_valid(fw)) + return -EINVAL; + ehdr = (struct elf32_hdr *)fw->data; - phdrs = (struct elf32_phdr *)(ehdr + 1); + phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
for (i = 0; i < ehdr->e_phnum; i++) { phdr = &phdrs[i]; @@ -136,8 +170,11 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len, ssize_t ret; void *data;
+ if (!mdt_header_valid(fw)) + return ERR_PTR(-EINVAL); + ehdr = (struct elf32_hdr *)fw->data; - phdrs = (struct elf32_phdr *)(ehdr + 1); + phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
if (ehdr->e_phnum < 2) return ERR_PTR(-EINVAL); @@ -216,8 +253,11 @@ int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw, int ret; int i;
+ if (!mdt_header_valid(fw)) + return -EINVAL; + ehdr = (struct elf32_hdr *)fw->data; - phdrs = (struct elf32_phdr *)(ehdr + 1); + phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
for (i = 0; i < ehdr->e_phnum; i++) { phdr = &phdrs[i]; @@ -272,7 +312,7 @@ static bool qcom_mdt_bins_are_split(const struct firmware *fw, const char *fw_na int i;
ehdr = (struct elf32_hdr *)fw->data; - phdrs = (struct elf32_phdr *)(ehdr + 1); + phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
for (i = 0; i < ehdr->e_phnum; i++) { /* @@ -312,9 +352,12 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, if (!fw || !mem_region || !mem_phys || !mem_size) return -EINVAL;
+ if (!mdt_header_valid(fw)) + return -EINVAL; + is_split = qcom_mdt_bins_are_split(fw, fw_name); ehdr = (struct elf32_hdr *)fw->data; - phdrs = (struct elf32_phdr *)(ehdr + 1); + phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
for (i = 0; i < ehdr->e_phnum; i++) { phdr = &phdrs[i]; diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c index dfc2d4e38fa9..163a58eb02e0 100644 --- a/drivers/soc/qcom/rpmh-rsc.c +++ b/drivers/soc/qcom/rpmh-rsc.c @@ -1075,7 +1075,7 @@ static int rpmh_rsc_probe(struct platform_device *pdev) drv->ver.minor = rsc_id & (MINOR_VER_MASK << MINOR_VER_SHIFT); drv->ver.minor >>= MINOR_VER_SHIFT;
- if (drv->ver.major == 3) + if (drv->ver.major >= 3) drv->regs = rpmh_rsc_reg_offset_ver_3_0; else drv->regs = rpmh_rsc_reg_offset_ver_2_7; diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 162f52456f65..1306e3b8b5c0 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -1232,7 +1232,7 @@ static int tegra_powergate_of_get_clks(struct tegra_powergate *pg, }
static int tegra_powergate_of_get_resets(struct tegra_powergate *pg, - struct device_node *np, bool off) + struct device_node *np) { struct device *dev = pg->pmc->dev; int err; @@ -1247,22 +1247,6 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg, err = reset_control_acquire(pg->reset); if (err < 0) { pr_err("failed to acquire resets: %d\n", err); - goto out; - } - - if (off) { - err = reset_control_assert(pg->reset); - } else { - err = reset_control_deassert(pg->reset); - if (err < 0) - goto out; - - reset_control_release(pg->reset); - } - -out: - if (err) { - reset_control_release(pg->reset); reset_control_put(pg->reset); }
@@ -1307,20 +1291,43 @@ static int tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np) goto set_available; }
- err = tegra_powergate_of_get_resets(pg, np, off); + err = tegra_powergate_of_get_resets(pg, np); if (err < 0) { dev_err(dev, "failed to get resets for %pOFn: %d\n", np, err); goto remove_clks; }
- if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) { - if (off) - WARN_ON(tegra_powergate_power_up(pg, true)); + /* + * If the power-domain is off, then ensure the resets are asserted. + * If the power-domain is on, then power down to ensure that when is + * it turned on the power-domain, clocks and resets are all in the + * expected state. + */ + if (off) { + err = reset_control_assert(pg->reset); + if (err) { + pr_err("failed to assert resets: %d\n", err); + goto remove_resets; + } + } else { + err = tegra_powergate_power_down(pg); + if (err) { + dev_err(dev, "failed to turn off PM domain %s: %d\n", + pg->genpd.name, err); + goto remove_resets; + } + }
+ /* + * If PM_GENERIC_DOMAINS is not enabled, power-on + * the domain and skip the genpd registration. + */ + if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) { + WARN_ON(tegra_powergate_power_up(pg, true)); goto remove_resets; }
- err = pm_genpd_init(&pg->genpd, NULL, off); + err = pm_genpd_init(&pg->genpd, NULL, true); if (err < 0) { dev_err(dev, "failed to initialise PM domain %pOFn: %d\n", np, err); diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c index b89f8067e6cd..3d8937245c18 100644 --- a/drivers/soundwire/amd_manager.c +++ b/drivers/soundwire/amd_manager.c @@ -1104,10 +1104,10 @@ static int __maybe_unused amd_pm_prepare(struct device *dev) * device is not in runtime suspend state, observed that device alerts are missing * without pm_prepare on AMD platforms in clockstop mode0. */ - if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) { - ret = pm_request_resume(dev); + if (amd_manager->power_mode_mask) { + ret = pm_runtime_resume(dev); if (ret < 0) { - dev_err(bus->dev, "pm_request_resume failed: %d\n", ret); + dev_err(bus->dev, "pm_runtime_resume failed: %d\n", ret); return 0; } } diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index 767942f19adb..e7397fd8e9ad 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -1730,15 +1730,15 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
/* Update the Slave driver */ if (slave_notify) { + if (slave->prop.use_domain_irq && slave->irq) + handle_nested_irq(slave->irq); + mutex_lock(&slave->sdw_dev_lock);
if (slave->probed) { struct device *dev = &slave->dev; struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
- if (slave->prop.use_domain_irq && slave->irq) - handle_nested_irq(slave->irq); - if (drv->ops && drv->ops->interrupt_callback) { slave_intr.sdca_cascade = sdca_cascade; slave_intr.control_port = clear; diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 9e2541dee56e..fa899ab2014c 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -330,13 +330,11 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) }
if (config.speed_hz > perclk_rate / 2) { - dev_err(fsl_lpspi->dev, - "per-clk should be at least two times of transfer speed"); - return -EINVAL; + div = 2; + } else { + div = DIV_ROUND_UP(perclk_rate, config.speed_hz); }
- div = DIV_ROUND_UP(perclk_rate, config.speed_hz); - for (prescale = 0; prescale <= prescale_max; prescale++) { scldiv = div / (1 << prescale) - 2; if (scldiv >= 0 && scldiv < 256) { diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c index 95cca281e8a3..07104e7f5a5f 100644 --- a/drivers/staging/media/imx/imx-media-csc-scaler.c +++ b/drivers/staging/media/imx/imx-media-csc-scaler.c @@ -914,7 +914,7 @@ imx_media_csc_scaler_device_init(struct imx_media_dev *md) return &priv->vdev;
err_m2m: - video_set_drvdata(vfd, NULL); + video_device_release(vfd); err_vfd: kfree(priv); return ERR_PTR(ret); diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index 6600ae44f29d..d3ab251ba049 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c @@ -257,11 +257,41 @@ static int iscsi_get_pr_transport_id_len( return len; }
-static char *iscsi_parse_pr_out_transport_id( +static void sas_parse_pr_out_transport_id(char *buf, char *i_str) +{ + char hex[17] = {}; + + bin2hex(hex, buf + 4, 8); + snprintf(i_str, TRANSPORT_IQN_LEN, "naa.%s", hex); +} + +static void srp_parse_pr_out_transport_id(char *buf, char *i_str) +{ + char hex[33] = {}; + + bin2hex(hex, buf + 8, 16); + snprintf(i_str, TRANSPORT_IQN_LEN, "0x%s", hex); +} + +static void fcp_parse_pr_out_transport_id(char *buf, char *i_str) +{ + snprintf(i_str, TRANSPORT_IQN_LEN, "%8phC", buf + 8); +} + +static void sbp_parse_pr_out_transport_id(char *buf, char *i_str) +{ + char hex[17] = {}; + + bin2hex(hex, buf + 8, 8); + snprintf(i_str, TRANSPORT_IQN_LEN, "%s", hex); +} + +static bool iscsi_parse_pr_out_transport_id( struct se_portal_group *se_tpg, char *buf, u32 *out_tid_len, - char **port_nexus_ptr) + char **port_nexus_ptr, + char *i_str) { char *p; int i; @@ -282,7 +312,7 @@ static char *iscsi_parse_pr_out_transport_id( if ((format_code != 0x00) && (format_code != 0x40)) { pr_err("Illegal format code: 0x%02x for iSCSI" " Initiator Transport ID\n", format_code); - return NULL; + return false; } /* * If the caller wants the TransportID Length, we set that value for the @@ -306,7 +336,7 @@ static char *iscsi_parse_pr_out_transport_id( pr_err("Unable to locate ",i,0x" separator" " for Initiator port identifier: %s\n", &buf[4]); - return NULL; + return false; } *p = '\0'; /* Terminate iSCSI Name */ p += 5; /* Skip over ",i,0x" separator */ @@ -339,7 +369,8 @@ static char *iscsi_parse_pr_out_transport_id( } else *port_nexus_ptr = NULL;
- return &buf[4]; + strscpy(i_str, &buf[4], TRANSPORT_IQN_LEN); + return true; }
int target_get_pr_transport_id_len(struct se_node_acl *nacl, @@ -387,33 +418,35 @@ int target_get_pr_transport_id(struct se_node_acl *nacl, } }
-const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg, - char *buf, u32 *out_tid_len, char **port_nexus_ptr) +bool target_parse_pr_out_transport_id(struct se_portal_group *tpg, + char *buf, u32 *out_tid_len, char **port_nexus_ptr, char *i_str) { - u32 offset; - switch (tpg->proto_id) { case SCSI_PROTOCOL_SAS: /* * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID * for initiator ports using SCSI over SAS Serial SCSI Protocol. */ - offset = 4; + sas_parse_pr_out_transport_id(buf, i_str); break; - case SCSI_PROTOCOL_SBP: case SCSI_PROTOCOL_SRP: + srp_parse_pr_out_transport_id(buf, i_str); + break; case SCSI_PROTOCOL_FCP: - offset = 8; + fcp_parse_pr_out_transport_id(buf, i_str); + break; + case SCSI_PROTOCOL_SBP: + sbp_parse_pr_out_transport_id(buf, i_str); break; case SCSI_PROTOCOL_ISCSI: return iscsi_parse_pr_out_transport_id(tpg, buf, out_tid_len, - port_nexus_ptr); + port_nexus_ptr, i_str); default: pr_err("Unknown proto_id: 0x%02x\n", tpg->proto_id); - return NULL; + return false; }
*port_nexus_ptr = NULL; *out_tid_len = 24; - return buf + offset; + return true; } diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 408be26d2e9b..20aab1f50565 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -103,8 +103,8 @@ int target_get_pr_transport_id_len(struct se_node_acl *nacl, int target_get_pr_transport_id(struct se_node_acl *nacl, struct t10_pr_registration *pr_reg, int *format_code, unsigned char *buf); -const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg, - char *buf, u32 *out_tid_len, char **port_nexus_ptr); +bool target_parse_pr_out_transport_id(struct se_portal_group *tpg, + char *buf, u32 *out_tid_len, char **port_nexus_ptr, char *i_str);
/* target_core_hba.c */ struct se_hba *core_alloc_hba(const char *, u32, u32); diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index a9eb6a3e8383..624d2f68bf38 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -1477,11 +1477,12 @@ core_scsi3_decode_spec_i_port( LIST_HEAD(tid_dest_list); struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; unsigned char *buf, *ptr, proto_ident; - const unsigned char *i_str = NULL; + unsigned char i_str[TRANSPORT_IQN_LEN]; char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; sense_reason_t ret; u32 tpdl, tid_len = 0; u32 dest_rtpi = 0; + bool tid_found;
/* * Allocate a struct pr_transport_id_holder and setup the @@ -1570,9 +1571,9 @@ core_scsi3_decode_spec_i_port( dest_rtpi = tmp_lun->lun_tpg->tpg_rtpi;
iport_ptr = NULL; - i_str = target_parse_pr_out_transport_id(tmp_tpg, - ptr, &tid_len, &iport_ptr); - if (!i_str) + tid_found = target_parse_pr_out_transport_id(tmp_tpg, + ptr, &tid_len, &iport_ptr, i_str); + if (!tid_found) continue; /* * Determine if this SCSI device server requires that @@ -3152,13 +3153,14 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; struct t10_reservation *pr_tmpl = &dev->t10_pr; unsigned char *buf; - const unsigned char *initiator_str; + unsigned char initiator_str[TRANSPORT_IQN_LEN]; char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN] = { }; u32 tid_len, tmp_tid_len; int new_reg = 0, type, scope, matching_iname; sense_reason_t ret; unsigned short rtpi; unsigned char proto_ident; + bool tid_found;
if (!se_sess || !se_lun) { pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); @@ -3277,9 +3279,9 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, ret = TCM_INVALID_PARAMETER_LIST; goto out; } - initiator_str = target_parse_pr_out_transport_id(dest_se_tpg, - &buf[24], &tmp_tid_len, &iport_ptr); - if (!initiator_str) { + tid_found = target_parse_pr_out_transport_id(dest_se_tpg, + &buf[24], &tmp_tid_len, &iport_ptr, initiator_str); + if (!tid_found) { pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" " initiator_str from Transport ID\n"); ret = TCM_INVALID_PARAMETER_LIST; diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c index 78c5cfe6a0c0..eeccf905f83e 100644 --- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c +++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2011-2015, 2017, 2020, The Linux Foundation. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */
#include <linux/bitops.h> @@ -16,6 +17,7 @@
#include "../thermal_hwmon.h"
+#define QPNP_TM_REG_DIG_MINOR 0x00 #define QPNP_TM_REG_DIG_MAJOR 0x01 #define QPNP_TM_REG_TYPE 0x04 #define QPNP_TM_REG_SUBTYPE 0x05 @@ -31,7 +33,7 @@ #define STATUS_GEN2_STATE_MASK GENMASK(6, 4) #define STATUS_GEN2_STATE_SHIFT 4
-#define SHUTDOWN_CTRL1_OVERRIDE_S2 BIT(6) +#define SHUTDOWN_CTRL1_OVERRIDE_STAGE2 BIT(6) #define SHUTDOWN_CTRL1_THRESHOLD_MASK GENMASK(1, 0)
#define SHUTDOWN_CTRL1_RATE_25HZ BIT(3) @@ -79,6 +81,7 @@ struct qpnp_tm_chip { /* protects .thresh, .stage and chip registers */ struct mutex lock; bool initialized; + bool require_stage2_shutdown;
struct iio_channel *adc; const long (*temp_map)[THRESH_COUNT][STAGE_COUNT]; @@ -221,13 +224,13 @@ static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip, { long stage2_threshold_min = (*chip->temp_map)[THRESH_MIN][1]; long stage2_threshold_max = (*chip->temp_map)[THRESH_MAX][1]; - bool disable_s2_shutdown = false; + bool disable_stage2_shutdown = false; u8 reg;
WARN_ON(!mutex_is_locked(&chip->lock));
/* - * Default: S2 and S3 shutdown enabled, thresholds at + * Default: Stage 2 and Stage 3 shutdown enabled, thresholds at * lowest threshold set, monitoring at 25Hz */ reg = SHUTDOWN_CTRL1_RATE_25HZ; @@ -242,12 +245,12 @@ static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip, chip->thresh = THRESH_MAX - ((stage2_threshold_max - temp) / TEMP_THRESH_STEP); - disable_s2_shutdown = true; + disable_stage2_shutdown = true; } else { chip->thresh = THRESH_MAX;
if (chip->adc) - disable_s2_shutdown = true; + disable_stage2_shutdown = true; else dev_warn(chip->dev, "No ADC is configured and critical temperature %d mC is above the maximum stage 2 threshold of %ld mC! Configuring stage 2 shutdown at %ld mC.\n", @@ -256,8 +259,8 @@ static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip,
skip: reg |= chip->thresh; - if (disable_s2_shutdown) - reg |= SHUTDOWN_CTRL1_OVERRIDE_S2; + if (disable_stage2_shutdown && !chip->require_stage2_shutdown) + reg |= SHUTDOWN_CTRL1_OVERRIDE_STAGE2;
return qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg); } @@ -371,8 +374,8 @@ static int qpnp_tm_probe(struct platform_device *pdev) { struct qpnp_tm_chip *chip; struct device_node *node; - u8 type, subtype, dig_major; - u32 res; + u8 type, subtype, dig_major, dig_minor; + u32 res, dig_revision; int ret, irq;
node = pdev->dev.of_node; @@ -424,6 +427,11 @@ static int qpnp_tm_probe(struct platform_device *pdev) return dev_err_probe(&pdev->dev, ret, "could not read dig_major\n");
+ ret = qpnp_tm_read(chip, QPNP_TM_REG_DIG_MINOR, &dig_minor); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, + "could not read dig_minor\n"); + if (type != QPNP_TM_TYPE || (subtype != QPNP_TM_SUBTYPE_GEN1 && subtype != QPNP_TM_SUBTYPE_GEN2)) { dev_err(&pdev->dev, "invalid type 0x%02x or subtype 0x%02x\n", @@ -437,6 +445,23 @@ static int qpnp_tm_probe(struct platform_device *pdev) else chip->temp_map = &temp_map_gen1;
+ if (chip->subtype == QPNP_TM_SUBTYPE_GEN2) { + dig_revision = (dig_major << 8) | dig_minor; + /* + * Check if stage 2 automatic partial shutdown must remain + * enabled to avoid potential repeated faults upon reaching + * over-temperature stage 3. + */ + switch (dig_revision) { + case 0x0001: + case 0x0002: + case 0x0100: + case 0x0101: + chip->require_stage2_shutdown = true; + break; + } + } + /* * Register the sensor before initializing the hardware to be able to * read the trip points. get_temp() returns the default temperature diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index eef40d4f3063..0dea605faadb 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -39,10 +39,13 @@ temp_show(struct device *dev, struct device_attribute *attr, char *buf)
ret = thermal_zone_get_temp(tz, &temperature);
- if (ret) - return ret; + if (!ret) + return sprintf(buf, "%d\n", temperature);
- return sprintf(buf, "%d\n", temperature); + if (ret == -EAGAIN) + return -ENODATA; + + return ret; }
static ssize_t diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c index 31f3da4e6a08..73be797acd50 100644 --- a/drivers/thunderbolt/domain.c +++ b/drivers/thunderbolt/domain.c @@ -36,7 +36,7 @@ static bool match_service_id(const struct tb_service_id *id, return false; }
- if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) { + if (id->match_flags & TBSVC_MATCH_PROTOCOL_REVISION) { if (id->protocol_revision != svc->prtcrevs) return false; } diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index d5ad6cae6b65..23aed9e89e30 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -2375,9 +2375,8 @@ int serial8250_do_startup(struct uart_port *port) /* * Now, initialize the UART */ - serial_port_out(port, UART_LCR, UART_LCR_WLEN8); - spin_lock_irqsave(&port->lock, flags); + serial_port_out(port, UART_LCR, UART_LCR_WLEN8); if (up->port.flags & UPF_FOURPORT) { if (!up->port.irq) up->port.mctrl |= TIOCM_OUT1; diff --git a/drivers/tty/vt/defkeymap.c_shipped b/drivers/tty/vt/defkeymap.c_shipped index 0c043e4f292e..6af7bf8d5460 100644 --- a/drivers/tty/vt/defkeymap.c_shipped +++ b/drivers/tty/vt/defkeymap.c_shipped @@ -23,6 +23,22 @@ unsigned short plain_map[NR_KEYS] = { 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116, 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, };
static unsigned short shift_map[NR_KEYS] = { @@ -42,6 +58,22 @@ static unsigned short shift_map[NR_KEYS] = { 0xf20b, 0xf601, 0xf602, 0xf117, 0xf600, 0xf20a, 0xf115, 0xf116, 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, };
static unsigned short altgr_map[NR_KEYS] = { @@ -61,6 +93,22 @@ static unsigned short altgr_map[NR_KEYS] = { 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116, 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, };
static unsigned short ctrl_map[NR_KEYS] = { @@ -80,6 +128,22 @@ static unsigned short ctrl_map[NR_KEYS] = { 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116, 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, };
static unsigned short shift_ctrl_map[NR_KEYS] = { @@ -99,6 +163,22 @@ static unsigned short shift_ctrl_map[NR_KEYS] = { 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116, 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, };
static unsigned short alt_map[NR_KEYS] = { @@ -118,6 +198,22 @@ static unsigned short alt_map[NR_KEYS] = { 0xf118, 0xf210, 0xf211, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116, 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, };
static unsigned short ctrl_alt_map[NR_KEYS] = { @@ -137,6 +233,22 @@ static unsigned short ctrl_alt_map[NR_KEYS] = { 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf20c, 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, };
unsigned short *key_maps[MAX_NR_KEYMAPS] = { diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c index 358f216c6cd6..18b3c197c134 100644 --- a/drivers/tty/vt/keyboard.c +++ b/drivers/tty/vt/keyboard.c @@ -1496,7 +1496,7 @@ static void kbd_keycode(unsigned int keycode, int down, bool hw_raw) rc = atomic_notifier_call_chain(&keyboard_notifier_list, KBD_UNICODE, ¶m); if (rc != NOTIFY_STOP) - if (down && !raw_mode) + if (down && !(raw_mode || kbd->kbdmode == VC_OFF)) k_unicode(vc, keysym, !down); return; } diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index dffc932285ac..e33466609052 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -326,7 +326,7 @@ static inline int ufshcd_rpm_get_sync(struct ufs_hba *hba)
static inline int ufshcd_rpm_get_if_active(struct ufs_hba *hba) { - return pm_runtime_get_if_active(&hba->ufs_device_wlun->sdev_gendev, true); + return pm_runtime_get_if_active(&hba->ufs_device_wlun->sdev_gendev); }
static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba) diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c index f61126189876..14c1b855e10a 100644 --- a/drivers/ufs/host/ufs-exynos.c +++ b/drivers/ufs/host/ufs-exynos.c @@ -1028,8 +1028,8 @@ static int exynos_ufs_post_link(struct ufs_hba *hba) hci_writel(ufs, 0xa, HCI_DATA_REORDER); hci_writel(ufs, PRDT_SET_SIZE(12), HCI_TXPRDT_ENTRY_SIZE); hci_writel(ufs, PRDT_SET_SIZE(12), HCI_RXPRDT_ENTRY_SIZE); - hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE); - hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE); + hci_writel(ufs, BIT(hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE); + hci_writel(ufs, BIT(hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE); hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN);
if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB) diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c index 248a49e5e7f3..c38ea3395b2c 100644 --- a/drivers/ufs/host/ufshcd-pci.c +++ b/drivers/ufs/host/ufshcd-pci.c @@ -213,6 +213,32 @@ static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba) return ret; }
+static void ufs_intel_ctrl_uic_compl(struct ufs_hba *hba, bool enable) +{ + u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + + if (enable) + set |= UIC_COMMAND_COMPL; + else + set &= ~UIC_COMMAND_COMPL; + ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); +} + +static void ufs_intel_mtl_h8_notify(struct ufs_hba *hba, + enum uic_cmd_dme cmd, + enum ufs_notify_change_status status) +{ + /* + * Disable UIC COMPL INTR to prevent access to UFSHCI after + * checking HCS.UPMCRS + */ + if (status == PRE_CHANGE && cmd == UIC_CMD_DME_HIBER_ENTER) + ufs_intel_ctrl_uic_compl(hba, false); + + if (status == POST_CHANGE && cmd == UIC_CMD_DME_HIBER_EXIT) + ufs_intel_ctrl_uic_compl(hba, true); +} + #define INTEL_ACTIVELTR 0x804 #define INTEL_IDLELTR 0x808
@@ -439,10 +465,23 @@ static int ufs_intel_adl_init(struct ufs_hba *hba) return ufs_intel_common_init(hba); }
+static void ufs_intel_mtl_late_init(struct ufs_hba *hba) +{ + hba->rpm_lvl = UFS_PM_LVL_2; + hba->spm_lvl = UFS_PM_LVL_2; +} + static int ufs_intel_mtl_init(struct ufs_hba *hba) { + struct ufs_host *ufs_host; + int err; + hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN; - return ufs_intel_common_init(hba); + err = ufs_intel_common_init(hba); + /* Get variant after it is set in ufs_intel_common_init() */ + ufs_host = ufshcd_get_variant(hba); + ufs_host->late_init = ufs_intel_mtl_late_init; + return err; }
static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = { @@ -487,6 +526,7 @@ static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = { .init = ufs_intel_mtl_init, .exit = ufs_intel_common_exit, .hce_enable_notify = ufs_intel_hce_enable_notify, + .hibern8_notify = ufs_intel_mtl_h8_notify, .link_startup_notify = ufs_intel_link_startup_notify, .resume = ufs_intel_resume, .device_reset = ufs_intel_device_reset, diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index 1443e9cf631a..db9a8f2731f1 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c @@ -980,25 +980,60 @@ static int cxacru_fw(struct usb_device *usb_dev, enum cxacru_fw_request fw, return ret; }
-static void cxacru_upload_firmware(struct cxacru_data *instance, - const struct firmware *fw, - const struct firmware *bp) + +static int cxacru_find_firmware(struct cxacru_data *instance, + char *phase, const struct firmware **fw_p) { - int ret; + struct usbatm_data *usbatm = instance->usbatm; + struct device *dev = &usbatm->usb_intf->dev; + char buf[16]; + + sprintf(buf, "cxacru-%s.bin", phase); + usb_dbg(usbatm, "cxacru_find_firmware: looking for %s\n", buf); + + if (request_firmware(fw_p, buf, dev)) { + usb_dbg(usbatm, "no stage %s firmware found\n", phase); + return -ENOENT; + } + + usb_info(usbatm, "found firmware %s\n", buf); + + return 0; +} + +static int cxacru_heavy_init(struct usbatm_data *usbatm_instance, + struct usb_interface *usb_intf) +{ + const struct firmware *fw, *bp; + struct cxacru_data *instance = usbatm_instance->driver_data; struct usbatm_data *usbatm = instance->usbatm; struct usb_device *usb_dev = usbatm->usb_dev; __le16 signature[] = { usb_dev->descriptor.idVendor, usb_dev->descriptor.idProduct }; __le32 val; + int ret;
- usb_dbg(usbatm, "%s\n", __func__); + ret = cxacru_find_firmware(instance, "fw", &fw); + if (ret) { + usb_warn(usbatm_instance, "firmware (cxacru-fw.bin) unavailable (system misconfigured?)\n"); + return ret; + } + + if (instance->modem_type->boot_rom_patch) { + ret = cxacru_find_firmware(instance, "bp", &bp); + if (ret) { + usb_warn(usbatm_instance, "boot ROM patch (cxacru-bp.bin) unavailable (system misconfigured?)\n"); + release_firmware(fw); + return ret; + } + }
/* FirmwarePllFClkValue */ val = cpu_to_le32(instance->modem_type->pll_f_clk); ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, PLLFCLK_ADDR, (u8 *) &val, 4); if (ret) { usb_err(usbatm, "FirmwarePllFClkValue failed: %d\n", ret); - return; + goto done; }
/* FirmwarePllBClkValue */ @@ -1006,7 +1041,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance, ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, PLLBCLK_ADDR, (u8 *) &val, 4); if (ret) { usb_err(usbatm, "FirmwarePllBClkValue failed: %d\n", ret); - return; + goto done; }
/* Enable SDRAM */ @@ -1014,7 +1049,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance, ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, SDRAMEN_ADDR, (u8 *) &val, 4); if (ret) { usb_err(usbatm, "Enable SDRAM failed: %d\n", ret); - return; + goto done; }
/* Firmware */ @@ -1022,7 +1057,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance, ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, FW_ADDR, fw->data, fw->size); if (ret) { usb_err(usbatm, "Firmware upload failed: %d\n", ret); - return; + goto done; }
/* Boot ROM patch */ @@ -1031,7 +1066,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance, ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_ADDR, bp->data, bp->size); if (ret) { usb_err(usbatm, "Boot ROM patching failed: %d\n", ret); - return; + goto done; } }
@@ -1039,7 +1074,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance, ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, SIG_ADDR, (u8 *) signature, 4); if (ret) { usb_err(usbatm, "Signature storing failed: %d\n", ret); - return; + goto done; }
usb_info(usbatm, "starting device\n"); @@ -1051,7 +1086,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance, } if (ret) { usb_err(usbatm, "Passing control to firmware failed: %d\n", ret); - return; + goto done; }
/* Delay to allow firmware to start up. */ @@ -1065,53 +1100,10 @@ static void cxacru_upload_firmware(struct cxacru_data *instance, ret = cxacru_cm(instance, CM_REQUEST_CARD_GET_STATUS, NULL, 0, NULL, 0); if (ret < 0) { usb_err(usbatm, "modem failed to initialize: %d\n", ret); - return; - } -} - -static int cxacru_find_firmware(struct cxacru_data *instance, - char *phase, const struct firmware **fw_p) -{ - struct usbatm_data *usbatm = instance->usbatm; - struct device *dev = &usbatm->usb_intf->dev; - char buf[16]; - - sprintf(buf, "cxacru-%s.bin", phase); - usb_dbg(usbatm, "cxacru_find_firmware: looking for %s\n", buf); - - if (request_firmware(fw_p, buf, dev)) { - usb_dbg(usbatm, "no stage %s firmware found\n", phase); - return -ENOENT; - } - - usb_info(usbatm, "found firmware %s\n", buf); - - return 0; -} - -static int cxacru_heavy_init(struct usbatm_data *usbatm_instance, - struct usb_interface *usb_intf) -{ - const struct firmware *fw, *bp; - struct cxacru_data *instance = usbatm_instance->driver_data; - int ret = cxacru_find_firmware(instance, "fw", &fw); - - if (ret) { - usb_warn(usbatm_instance, "firmware (cxacru-fw.bin) unavailable (system misconfigured?)\n"); - return ret; + goto done; }
- if (instance->modem_type->boot_rom_patch) { - ret = cxacru_find_firmware(instance, "bp", &bp); - if (ret) { - usb_warn(usbatm_instance, "boot ROM patch (cxacru-bp.bin) unavailable (system misconfigured?)\n"); - release_firmware(fw); - return ret; - } - } - - cxacru_upload_firmware(instance, fw, bp); - +done: if (instance->modem_type->boot_rom_patch) release_firmware(bp); release_firmware(fw); diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index c1d7d87b32cc..f0c87c914149 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1520,6 +1520,12 @@ static int acm_probe(struct usb_interface *intf, goto err_remove_files; }
+ if (quirks & CLEAR_HALT_CONDITIONS) { + /* errors intentionally ignored */ + usb_clear_halt(usb_dev, acm->in); + usb_clear_halt(usb_dev, acm->out); + } + tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor, &control_interface->dev); if (IS_ERR(tty_dev)) { @@ -1527,11 +1533,6 @@ static int acm_probe(struct usb_interface *intf, goto err_release_data_interface; }
- if (quirks & CLEAR_HALT_CONDITIONS) { - usb_clear_halt(usb_dev, acm->in); - usb_clear_halt(usb_dev, acm->out); - } - dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor);
return 0; diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 847dd32c0f5e..3180419424c0 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -81,8 +81,14 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, */ desc = (struct usb_ss_ep_comp_descriptor *) buffer;
- if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP || - size < USB_DT_SS_EP_COMP_SIZE) { + if (size < USB_DT_SS_EP_COMP_SIZE) { + dev_notice(ddev, + "invalid SuperSpeed endpoint companion descriptor " + "of length %d, skipping\n", size); + return; + } + + if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) { dev_notice(ddev, "No SuperSpeed endpoint companion for config %d " " interface %d altsetting %d ep %d: " "using minimum values\n", diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 12b6dfeaf658..0b2a3f645d2f 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -2152,7 +2152,7 @@ static struct urb *request_single_step_set_feature_urb( urb->complete = usb_ehset_completion; urb->status = -EINPROGRESS; urb->actual_length = 0; - urb->transfer_flags = URB_DIR_IN; + urb->transfer_flags = URB_DIR_IN | URB_NO_TRANSFER_DMA_MAP; usb_get_urb(urb); atomic_inc(&urb->use_count); atomic_inc(&urb->dev->urbnum); @@ -2216,9 +2216,15 @@ int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
/* Complete remaining DATA and STATUS stages using the same URB */ urb->status = -EINPROGRESS; + urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP; usb_get_urb(urb); atomic_inc(&urb->use_count); atomic_inc(&urb->dev->urbnum); + if (map_urb_for_dma(hcd, urb, GFP_KERNEL)) { + usb_put_urb(urb); + goto out1; + } + retval = hcd->driver->submit_single_step_set_feature(hcd, urb, 0); if (!retval && !wait_for_completion_timeout(&done, msecs_to_jiffies(2000))) { diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 46db600fdd82..bfd97cad8aa4 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -371,6 +371,7 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
/* SanDisk Corp. SanDisk 3.2Gen1 */ + { USB_DEVICE(0x0781, 0x5596), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x0781, 0x55a3), .driver_info = USB_QUIRK_DELAY_INIT },
/* SanDisk Extreme 55AE */ diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 7576920e2d5a..9f202f575cec 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -500,7 +500,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
/* Check that the pipe's type matches the endpoint's type */ if (usb_pipe_type_check(urb->dev, urb->pipe)) - dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n", + dev_warn_once(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n", usb_pipetype(urb->pipe), pipetypes[xfertype]);
/* Check against a simple/standard policy */ diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c index a1e15f2fffdb..b53468c41f67 100644 --- a/drivers/usb/dwc3/dwc3-imx8mp.c +++ b/drivers/usb/dwc3/dwc3-imx8mp.c @@ -244,7 +244,7 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev) IRQF_ONESHOT, dev_name(dev), dwc3_imx); if (err) { dev_err(dev, "failed to request IRQ #%d --> %d\n", irq, err); - goto depopulate; + goto put_dwc3; }
device_set_wakeup_capable(dev, true); @@ -252,6 +252,8 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
return 0;
+put_dwc3: + put_device(&dwc3_imx->dwc3->dev); depopulate: of_platform_depopulate(dev); err_node_put: @@ -272,6 +274,8 @@ static void dwc3_imx8mp_remove(struct platform_device *pdev) struct dwc3_imx8mp *dwc3_imx = platform_get_drvdata(pdev); struct device *dev = &pdev->dev;
+ put_device(&dwc3_imx->dwc3->dev); + pm_runtime_get_sync(dev); of_platform_depopulate(dev);
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c index 2c07c038b584..6ea1a876203d 100644 --- a/drivers/usb/dwc3/dwc3-meson-g12a.c +++ b/drivers/usb/dwc3/dwc3-meson-g12a.c @@ -837,6 +837,9 @@ static void dwc3_meson_g12a_remove(struct platform_device *pdev)
usb_role_switch_unregister(priv->role_switch);
+ put_device(priv->switch_desc.udc); + put_device(priv->switch_desc.usb2_port); + of_platform_depopulate(dev);
for (i = 0 ; i < PHY_COUNT ; ++i) { diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 54a4ee2b90b7..39c72cb52ce7 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -41,6 +41,7 @@ #define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee #define PCI_DEVICE_ID_INTEL_TGPH 0x43ee #define PCI_DEVICE_ID_INTEL_JSP 0x4dee +#define PCI_DEVICE_ID_INTEL_WCL 0x4d7e #define PCI_DEVICE_ID_INTEL_ADL 0x460e #define PCI_DEVICE_ID_INTEL_ADL_PCH 0x51ee #define PCI_DEVICE_ID_INTEL_ADLN 0x465e @@ -431,6 +432,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_DEVICE_DATA(INTEL, TGPLP, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, TGPH, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, JSP, &dwc3_pci_intel_swnode) }, + { PCI_DEVICE_DATA(INTEL, WCL, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, ADL, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, ADL_PCH, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, ADLN, &dwc3_pci_intel_swnode) }, diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 874497f86499..876a839f2d1d 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -288,7 +288,9 @@ void dwc3_ep0_out_start(struct dwc3 *dwc) dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 8, DWC3_TRBCTL_CONTROL_SETUP, false); ret = dwc3_ep0_start_trans(dep); - WARN_ON(ret < 0); + if (ret < 0) + dev_err(dwc->dev, "ep0 out start transfer failed: %d\n", ret); + for (i = 2; i < DWC3_ENDPOINTS_NUM; i++) { struct dwc3_ep *dwc3_ep;
@@ -1061,7 +1063,9 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, ret = dwc3_ep0_start_trans(dep); }
- WARN_ON(ret < 0); + if (ret < 0) + dev_err(dwc->dev, + "ep0 data phase start transfer failed: %d\n", ret); }
static int dwc3_ep0_start_control_status(struct dwc3_ep *dep) @@ -1078,7 +1082,12 @@ static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep) { - WARN_ON(dwc3_ep0_start_control_status(dep)); + int ret; + + ret = dwc3_ep0_start_control_status(dep); + if (ret) + dev_err(dwc->dev, + "ep0 status phase start transfer failed: %d\n", ret); }
static void dwc3_ep0_do_control_status(struct dwc3 *dwc, @@ -1121,7 +1130,10 @@ void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep) cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); memset(¶ms, 0, sizeof(params)); ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); - WARN_ON_ONCE(ret); + if (ret) + dev_err_ratelimited(dwc->dev, + "ep0 data phase end transfer failed: %d\n", ret); + dep->resource_index = 0; }
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index a17af4ab20a3..6e90f2ad0426 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1765,7 +1765,11 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int dep->flags |= DWC3_EP_DELAY_STOP; return 0; } - WARN_ON_ONCE(ret); + + if (ret) + dev_err_ratelimited(dep->dwc->dev, + "end transfer failed: %d\n", ret); + dep->resource_index = 0;
if (!interrupt) @@ -3735,6 +3739,15 @@ static void dwc3_gadget_endpoint_transfer_complete(struct dwc3_ep *dep, static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep, const struct dwc3_event_depevt *event) { + /* + * During a device-initiated disconnect, a late xferNotReady event can + * be generated after the End Transfer command resets the event filter, + * but before the controller is halted. Ignore it to prevent a new + * transfer from starting. + */ + if (!dep->dwc->connected) + return; + dwc3_gadget_endpoint_frame_from_event(dep, event);
/* @@ -4036,7 +4049,9 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) dep->flags &= ~DWC3_EP_STALL;
ret = dwc3_send_clear_stall_ep_cmd(dep); - WARN_ON_ONCE(ret); + if (ret) + dev_err_ratelimited(dwc->dev, + "failed to clear STALL on %s\n", dep->name); } }
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index a93ad93390ba..34685c714473 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -2658,6 +2658,7 @@ static void renesas_usb3_remove(struct platform_device *pdev) struct renesas_usb3 *usb3 = platform_get_drvdata(pdev);
debugfs_remove_recursive(usb3->dentry); + put_device(usb3->host_dev); device_remove_file(&pdev->dev, &dev_attr_role);
cancel_work_sync(&usb3->role_work); diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index a2b6a922077e..d3d535ed00b5 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -735,8 +735,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci, if (!xhci->devs[i]) continue;
- retval = xhci_disable_slot(xhci, i); - xhci_free_virt_device(xhci, i); + retval = xhci_disable_and_free_slot(xhci, i); if (retval) xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n", i, retval); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index cceb69d4f61e..04718048b74b 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -849,21 +849,20 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci, * will be manipulated by the configure endpoint, allocate device, or update * hub functions while this function is removing the TT entries from the list. */ -void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) +void xhci_free_virt_device(struct xhci_hcd *xhci, struct xhci_virt_device *dev, + int slot_id) { - struct xhci_virt_device *dev; int i; int old_active_eps = 0;
/* Slot ID 0 is reserved */ - if (slot_id == 0 || !xhci->devs[slot_id]) + if (slot_id == 0 || !dev) return;
- dev = xhci->devs[slot_id]; - - xhci->dcbaa->dev_context_ptrs[slot_id] = 0; - if (!dev) - return; + /* If device ctx array still points to _this_ device, clear it */ + if (dev->out_ctx && + xhci->dcbaa->dev_context_ptrs[slot_id] == cpu_to_le64(dev->out_ctx->dma)) + xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
trace_xhci_free_virt_device(dev);
@@ -902,8 +901,9 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
if (dev->udev && dev->udev->slot_id) dev->udev->slot_id = 0; - kfree(xhci->devs[slot_id]); - xhci->devs[slot_id] = NULL; + if (xhci->devs[slot_id] == dev) + xhci->devs[slot_id] = NULL; + kfree(dev); }
/* @@ -945,7 +945,7 @@ static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_i out: /* we are now at a leaf device */ xhci_debugfs_remove_slot(xhci, slot_id); - xhci_free_virt_device(xhci, slot_id); + xhci_free_virt_device(xhci, vdev, slot_id); }
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, @@ -1182,6 +1182,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma | dev->eps[0].ring->cycle_state);
+ ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(8)); + trace_xhci_setup_addressable_virt_device(dev);
/* Steps 7 and 8 were done in xhci_alloc_virt_device() */ diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c index 93f8b355bc70..4ceed19c64f0 100644 --- a/drivers/usb/host/xhci-pci-renesas.c +++ b/drivers/usb/host/xhci-pci-renesas.c @@ -47,8 +47,9 @@ #define RENESAS_ROM_ERASE_MAGIC 0x5A65726F #define RENESAS_ROM_WRITE_MAGIC 0x53524F4D
-#define RENESAS_RETRY 10000 -#define RENESAS_DELAY 10 +#define RENESAS_RETRY 50000 /* 50000 * RENESAS_DELAY ~= 500ms */ +#define RENESAS_CHIP_ERASE_RETRY 500000 /* 500000 * RENESAS_DELAY ~= 5s */ +#define RENESAS_DELAY 10
static int renesas_fw_download_image(struct pci_dev *dev, const u32 *fw, size_t step, bool rom) @@ -405,7 +406,7 @@ static void renesas_rom_erase(struct pci_dev *pdev) /* sleep a bit while ROM is erased */ msleep(20);
- for (i = 0; i < RENESAS_RETRY; i++) { + for (i = 0; i < RENESAS_CHIP_ERASE_RETRY; i++) { retval = pci_read_config_byte(pdev, RENESAS_ROM_STATUS, &status); status &= RENESAS_ROM_STATUS_ERASE; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 44352df58c9e..a21ac9d80275 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1338,12 +1338,15 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, */ void xhci_hc_died(struct xhci_hcd *xhci) { + bool notify; int i, j;
if (xhci->xhc_state & XHCI_STATE_DYING) return;
- xhci_err(xhci, "xHCI host controller not responding, assume dead\n"); + notify = !(xhci->xhc_state & XHCI_STATE_REMOVING); + if (notify) + xhci_err(xhci, "xHCI host controller not responding, assume dead\n"); xhci->xhc_state |= XHCI_STATE_DYING;
xhci_cleanup_command_queue(xhci); @@ -1357,7 +1360,7 @@ void xhci_hc_died(struct xhci_hcd *xhci) }
/* inform usb core hc died if PCI remove isn't already handling it */ - if (!(xhci->xhc_state & XHCI_STATE_REMOVING)) + if (notify) usb_hc_died(xhci_to_hcd(xhci)); }
@@ -1584,7 +1587,8 @@ static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, command->slot_id = 0; }
-static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) +static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id, + u32 cmd_comp_code) { struct xhci_virt_device *virt_dev; struct xhci_slot_ctx *slot_ctx; @@ -1599,6 +1603,10 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) /* Delete default control endpoint resources */ xhci_free_device_endpoint_resources(xhci, virt_dev, true); + if (cmd_comp_code == COMP_SUCCESS) { + xhci->dcbaa->dev_context_ptrs[slot_id] = 0; + xhci->devs[slot_id] = NULL; + } }
static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, @@ -1847,7 +1855,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code); break; case TRB_DISABLE_SLOT: - xhci_handle_cmd_disable_slot(xhci, slot_id); + xhci_handle_cmd_disable_slot(xhci, slot_id, cmd_comp_code); break; case TRB_CONFIG_EP: if (!cmd->completion) @@ -4454,7 +4462,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
if ((xhci->xhc_state & XHCI_STATE_DYING) || (xhci->xhc_state & XHCI_STATE_HALTED)) { - xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); + xhci_dbg(xhci, "xHCI dying or halted, can't queue_command. state: 0x%x\n", + xhci->xhc_state); return -ESHUTDOWN; }
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index ce38cd2435c8..8fd88fedbb30 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -119,7 +119,8 @@ int xhci_halt(struct xhci_hcd *xhci) ret = xhci_handshake(&xhci->op_regs->status, STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); if (ret) { - xhci_warn(xhci, "Host halt failed, %d\n", ret); + if (!(xhci->xhc_state & XHCI_STATE_DYING)) + xhci_warn(xhci, "Host halt failed, %d\n", ret); return ret; }
@@ -178,7 +179,8 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) state = readl(&xhci->op_regs->status);
if (state == ~(u32)0) { - xhci_warn(xhci, "Host not accessible, reset failed.\n"); + if (!(xhci->xhc_state & XHCI_STATE_DYING)) + xhci_warn(xhci, "Host not accessible, reset failed.\n"); return -ENODEV; }
@@ -3758,8 +3760,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd, * Obtaining a new device slot to inform the xHCI host that * the USB device has been reset. */ - ret = xhci_disable_slot(xhci, udev->slot_id); - xhci_free_virt_device(xhci, udev->slot_id); + ret = xhci_disable_and_free_slot(xhci, udev->slot_id); if (!ret) { ret = xhci_alloc_dev(hcd, udev); if (ret == 1) @@ -3914,7 +3915,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) xhci_disable_slot(xhci, udev->slot_id);
spin_lock_irqsave(&xhci->lock, flags); - xhci_free_virt_device(xhci, udev->slot_id); + xhci_free_virt_device(xhci, virt_dev, udev->slot_id); spin_unlock_irqrestore(&xhci->lock, flags);
} @@ -3963,6 +3964,16 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) return 0; }
+int xhci_disable_and_free_slot(struct xhci_hcd *xhci, u32 slot_id) +{ + struct xhci_virt_device *vdev = xhci->devs[slot_id]; + int ret; + + ret = xhci_disable_slot(xhci, slot_id); + xhci_free_virt_device(xhci, vdev, slot_id); + return ret; +} + /* * Checks if we have enough host controller resources for the default control * endpoint. @@ -4069,8 +4080,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) return 1;
disable_slot: - xhci_disable_slot(xhci, udev->slot_id); - xhci_free_virt_device(xhci, udev->slot_id); + xhci_disable_and_free_slot(xhci, udev->slot_id);
return 0; } @@ -4206,8 +4216,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
mutex_unlock(&xhci->mutex); - ret = xhci_disable_slot(xhci, udev->slot_id); - xhci_free_virt_device(xhci, udev->slot_id); + ret = xhci_disable_and_free_slot(xhci, udev->slot_id); if (!ret) { if (xhci_alloc_dev(hcd, udev) == 1) xhci_setup_addressable_virt_dev(xhci, udev); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 159cdfc71290..808f2ee43b94 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1798,7 +1798,7 @@ void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *), /* xHCI memory management */ void xhci_mem_cleanup(struct xhci_hcd *xhci); int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags); -void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id); +void xhci_free_virt_device(struct xhci_hcd *xhci, struct xhci_virt_device *dev, int slot_id); int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags); int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev); void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, @@ -1895,6 +1895,7 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, struct usb_tt *tt, gfp_t mem_flags); int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); +int xhci_disable_and_free_slot(struct xhci_hcd *xhci, u32 slot_id); int xhci_ext_cap_init(struct xhci_hcd *xhci);
int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup); diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index b4a4c1df4e0d..a4668c6d575d 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -400,7 +400,7 @@ static int omap2430_probe(struct platform_device *pdev) ret = platform_device_add_resources(musb, pdev->resource, pdev->num_resources); if (ret) { dev_err(&pdev->dev, "failed to add resources\n"); - goto err2; + goto err_put_control_otghs; }
if (populate_irqs) { @@ -413,7 +413,7 @@ static int omap2430_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -EINVAL; - goto err2; + goto err_put_control_otghs; }
musb_res[i].start = res->start; @@ -441,14 +441,14 @@ static int omap2430_probe(struct platform_device *pdev) ret = platform_device_add_resources(musb, musb_res, i); if (ret) { dev_err(&pdev->dev, "failed to add IRQ resources\n"); - goto err2; + goto err_put_control_otghs; } }
ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); if (ret) { dev_err(&pdev->dev, "failed to add platform_data\n"); - goto err2; + goto err_put_control_otghs; }
pm_runtime_enable(glue->dev); @@ -463,7 +463,9 @@ static int omap2430_probe(struct platform_device *pdev)
err3: pm_runtime_disable(glue->dev); - +err_put_control_otghs: + if (!IS_ERR(glue->control_otghs)) + put_device(glue->control_otghs); err2: platform_device_put(musb);
@@ -477,6 +479,8 @@ static void omap2430_remove(struct platform_device *pdev)
platform_device_unregister(glue->musb); pm_runtime_disable(glue->dev); + if (!IS_ERR(glue->control_otghs)) + put_device(glue->control_otghs); }
#ifdef CONFIG_PM diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c index 0c423916d7bf..a026c6cb6e68 100644 --- a/drivers/usb/storage/realtek_cr.c +++ b/drivers/usb/storage/realtek_cr.c @@ -252,7 +252,7 @@ static int rts51x_bulk_transport(struct us_data *us, u8 lun, return USB_STOR_TRANSPORT_ERROR; }
- residue = bcs->Residue; + residue = le32_to_cpu(bcs->Residue); if (bcs->Tag != us->tag) return USB_STOR_TRANSPORT_ERROR;
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 54f0b1c83317..dfa5276a5a43 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -934,6 +934,13 @@ UNUSUAL_DEV( 0x05e3, 0x0723, 0x9451, 0x9451, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_SANE_SENSE ),
+/* Added by Maël GUERIN mael.guerin@murena.io */ +UNUSUAL_DEV( 0x0603, 0x8611, 0x0000, 0xffff, + "Novatek", + "NTK96550-based camera", + USB_SC_SCSI, USB_PR_BULK, NULL, + US_FL_BULK_IGNORE_TAG ), + /* * Reported by Hanno Boeck hanno@gmx.de * Taken from the Lycoris Kernel @@ -1494,6 +1501,28 @@ UNUSUAL_DEV( 0x0bc2, 0x3332, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_WP_DETECT ),
+/* + * Reported by Zenm Chen zenmchen@gmail.com + * Ignore driver CD mode, otherwise usb_modeswitch may fail to switch + * the device into Wi-Fi mode. + */ +UNUSUAL_DEV( 0x0bda, 0x1a2b, 0x0000, 0xffff, + "Realtek", + "DISK", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_DEVICE ), + +/* + * Reported by Zenm Chen zenmchen@gmail.com + * Ignore driver CD mode, otherwise usb_modeswitch may fail to switch + * the device into Wi-Fi mode. + */ +UNUSUAL_DEV( 0x0bda, 0xa192, 0x0000, 0xffff, + "Realtek", + "DISK", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_DEVICE ), + UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999, "Maxtor", "USB to SATA", diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c index 60ed1f809130..a174ff7a9abd 100644 --- a/drivers/usb/typec/mux/intel_pmc_mux.c +++ b/drivers/usb/typec/mux/intel_pmc_mux.c @@ -730,7 +730,7 @@ static int pmc_usb_probe(struct platform_device *pdev)
pmc->ipc = devm_intel_scu_ipc_dev_get(&pdev->dev); if (!pmc->ipc) - return -ENODEV; + return -EPROBE_DEFER;
pmc->dev = &pdev->dev;
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c index bc21006e979c..03749a392fdb 100644 --- a/drivers/usb/typec/tcpm/fusb302.c +++ b/drivers/usb/typec/tcpm/fusb302.c @@ -103,6 +103,7 @@ struct fusb302_chip { bool vconn_on; bool vbus_on; bool charge_on; + bool pd_rx_on; bool vbus_present; enum typec_cc_polarity cc_polarity; enum typec_cc_status cc1; @@ -841,6 +842,11 @@ static int tcpm_set_pd_rx(struct tcpc_dev *dev, bool on) int ret = 0;
mutex_lock(&chip->lock); + if (chip->pd_rx_on == on) { + fusb302_log(chip, "pd is already %s", str_on_off(on)); + goto done; + } + ret = fusb302_pd_rx_flush(chip); if (ret < 0) { fusb302_log(chip, "cannot flush pd rx buffer, ret=%d", ret); @@ -863,6 +869,8 @@ static int tcpm_set_pd_rx(struct tcpc_dev *dev, bool on) on ? "on" : "off", ret); goto done; } + + chip->pd_rx_on = on; fusb302_log(chip, "pd := %s", on ? "on" : "off"); done: mutex_unlock(&chip->lock); diff --git a/drivers/usb/typec/tcpm/maxim_contaminant.c b/drivers/usb/typec/tcpm/maxim_contaminant.c index 60f90272fed3..e37bf26cca80 100644 --- a/drivers/usb/typec/tcpm/maxim_contaminant.c +++ b/drivers/usb/typec/tcpm/maxim_contaminant.c @@ -5,6 +5,7 @@ * USB-C module to reduce wakeups due to contaminants. */
+#include <linux/bitfield.h> #include <linux/device.h> #include <linux/irqreturn.h> #include <linux/module.h> @@ -189,6 +190,11 @@ static int max_contaminant_read_comparators(struct max_tcpci_chip *chip, u8 *ven if (ret < 0) return ret;
+ /* Disable low power mode */ + ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL_MASK, + FIELD_PREP(CCLPMODESEL_MASK, + LOW_POWER_MODE_DISABLE)); + /* Sleep to allow comparators settle */ usleep_range(5000, 6000); ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL, TCPC_TCPC_CTRL_ORIENTATION, PLUG_ORNT_CC1); @@ -322,6 +328,34 @@ static int max_contaminant_enable_dry_detection(struct max_tcpci_chip *chip) return 0; }
+static int max_contaminant_enable_toggling(struct max_tcpci_chip *chip) +{ + struct regmap *regmap = chip->data.regmap; + int ret; + + /* Disable dry detection if enabled. */ + ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL_MASK, + ULTRA_LOW_POWER_MODE); + if (ret) + return ret; + + ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL1, CCCONNDRY, 0); + if (ret) + return ret; + + ret = max_tcpci_write8(chip, TCPC_ROLE_CTRL, TCPC_ROLE_CTRL_DRP | 0xA); + if (ret) + return ret; + + ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL, + TCPC_TCPC_CTRL_EN_LK4CONN_ALRT, + TCPC_TCPC_CTRL_EN_LK4CONN_ALRT); + if (ret) + return ret; + + return max_tcpci_write8(chip, TCPC_COMMAND, TCPC_CMD_LOOK4CONNECTION); +} + bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce) { u8 cc_status, pwr_cntl; @@ -335,6 +369,12 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect if (ret < 0) return false;
+ if (cc_status & TCPC_CC_STATUS_TOGGLING) { + if (chip->contaminant_state == DETECTED) + return true; + return false; + } + if (chip->contaminant_state == NOT_DETECTED || chip->contaminant_state == SINK) { if (!disconnect_while_debounce) msleep(100); @@ -367,6 +407,12 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect max_contaminant_enable_dry_detection(chip); return true; } + + ret = max_contaminant_enable_toggling(chip); + if (ret) + dev_err(chip->dev, + "Failed to enable toggling, ret=%d", + ret); } return false; } else if (chip->contaminant_state == DETECTED) { @@ -375,6 +421,14 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect if (chip->contaminant_state == DETECTED) { max_contaminant_enable_dry_detection(chip); return true; + } else { + ret = max_contaminant_enable_toggling(chip); + if (ret) { + dev_err(chip->dev, + "Failed to enable toggling, ret=%d", + ret); + return true; + } } } } diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.h b/drivers/usb/typec/tcpm/tcpci_maxim.h index 2c1c4d161b0d..861801cc456f 100644 --- a/drivers/usb/typec/tcpm/tcpci_maxim.h +++ b/drivers/usb/typec/tcpm/tcpci_maxim.h @@ -21,6 +21,7 @@ #define CCOVPDIS BIT(6) #define SBURPCTRL BIT(5) #define CCLPMODESEL_MASK GENMASK(4, 3) +#define LOW_POWER_MODE_DISABLE 0 #define ULTRA_LOW_POWER_MODE BIT(3) #define CCRPCTRL_MASK GENMASK(2, 0) #define UA_1_SRC 1 diff --git a/drivers/usb/typec/ucsi/psy.c b/drivers/usb/typec/ucsi/psy.c index b35c6e07911e..9b0157063df0 100644 --- a/drivers/usb/typec/ucsi/psy.c +++ b/drivers/usb/typec/ucsi/psy.c @@ -163,7 +163,7 @@ static int ucsi_psy_get_current_max(struct ucsi_connector *con, case UCSI_CONSTAT_PWR_OPMODE_DEFAULT: /* UCSI can't tell b/w DCP/CDP or USB2/3x1/3x2 SDP chargers */ default: - val->intval = 0; + val->intval = UCSI_TYPEC_DEFAULT_CURRENT * 1000; break; } return 0; diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index ea98bc567494..e5c001ee0cd7 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -910,6 +910,7 @@ static void ucsi_handle_connector_change(struct work_struct *work)
if (con->status.change & UCSI_CONSTAT_POWER_DIR_CHANGE) { typec_set_pwr_role(con->port, role); + ucsi_port_psy_changed(con);
/* Complete pending power role swap */ if (!completion_done(&con->complete)) diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h index 7706f4e95125..51e745117dcb 100644 --- a/drivers/usb/typec/ucsi/ucsi.h +++ b/drivers/usb/typec/ucsi/ucsi.h @@ -340,9 +340,10 @@ struct ucsi { #define UCSI_MAX_SVID 5 #define UCSI_MAX_ALTMODES (UCSI_MAX_SVID * 6)
-#define UCSI_TYPEC_VSAFE5V 5000 -#define UCSI_TYPEC_1_5_CURRENT 1500 -#define UCSI_TYPEC_3_0_CURRENT 3000 +#define UCSI_TYPEC_VSAFE5V 5000 +#define UCSI_TYPEC_DEFAULT_CURRENT 100 +#define UCSI_TYPEC_1_5_CURRENT 1500 +#define UCSI_TYPEC_3_0_CURRENT 3000
struct ucsi_connector { int num; diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c index 2d996c913ecd..82558fa7712e 100644 --- a/drivers/vfio/pci/mlx5/cmd.c +++ b/drivers/vfio/pci/mlx5/cmd.c @@ -1389,8 +1389,8 @@ int mlx5vf_start_page_tracker(struct vfio_device *vdev, log_max_msg_size = MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_max_msg_size); max_msg_size = (1ULL << log_max_msg_size); /* The RQ must hold at least 4 WQEs/messages for successful QP creation */ - if (rq_size < 4 * max_msg_size) - rq_size = 4 * max_msg_size; + if (rq_size < 4ULL * max_msg_size) + rq_size = 4ULL * max_msg_size;
memset(tracker, 0, sizeof(*tracker)); tracker->uar = mlx5_get_uars_page(mdev); diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 5fe7aed3672e..f63f116b9cd0 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -635,6 +635,13 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
while (npage) { if (!batch->size) { + /* + * Large mappings may take a while to repeatedly refill + * the batch, so conditionally relinquish the CPU when + * needed to avoid stalls. + */ + cond_resched(); + /* Empty batch, so refill it. */ long req_pages = min_t(long, npage, batch->capacity);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index d0238bd741b0..147cfb64bba2 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -2770,6 +2770,9 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, } r = __vhost_add_used_n(vq, heads, count);
+ if (r < 0) + return r; + /* Make sure buffer is written before we update index. */ smp_wmb(); if (vhost_put_used_idx(vq)) { diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index d94a06008ff6..2ec0d5260685 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -340,6 +340,9 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
len = iov_length(vq->iov, out);
+ if (len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM) + return NULL; + /* len contains both payload and hdr */ skb = virtio_vsock_alloc_skb(len, GFP_KERNEL); if (!skb) @@ -363,8 +366,7 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq, return skb;
/* The pkt is too big or the length in the header is invalid */ - if (payload_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE || - payload_len + sizeof(*hdr) > len) { + if (payload_len + sizeof(*hdr) > len) { kfree_skb(skb); return NULL; } diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index bbc362db40c5..14e5312a0030 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -1139,7 +1139,7 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, c->vc_screenbuf_size - delta); c->vc_origin = vga_vram_end - c->vc_screenbuf_size; vga_rolled_over = 0; - } else if (oldo - delta >= (unsigned long)c->vc_screenbuf) + } else c->vc_origin -= delta; c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; scr_memsetw((u16 *) (c->vc_origin), c->vc_video_erase_char, diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index ed68ba89b80b..58eee27aa6cc 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -808,7 +808,8 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info, fg_vc->vc_rows); }
- update_screen(vc_cons[fg_console].d); + if (fg_console != unit) + update_screen(vc_cons[fg_console].d); }
/** @@ -1353,6 +1354,7 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, struct vc_data *svc; struct fbcon_ops *ops = info->fbcon_par; int rows, cols; + unsigned long ret = 0;
p = &fb_display[unit];
@@ -1403,11 +1405,10 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); cols /= vc->vc_font.width; rows /= vc->vc_font.height; - vc_resize(vc, cols, rows); + ret = vc_resize(vc, cols, rows);
- if (con_is_visible(vc)) { + if (con_is_visible(vc) && !ret) update_screen(vc); - } }
static __inline__ void ywrap_up(struct vc_data *vc, int count) diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 52bd3af54369..942b942f6bf9 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -943,6 +943,9 @@ static int do_register_framebuffer(struct fb_info *fb_info) if (!registered_fb[i]) break;
+ if (i >= FB_MAX) + return -ENXIO; + if (!fb_info->modelist.prev || !fb_info->modelist.next) INIT_LIST_HEAD(&fb_info->modelist);
diff --git a/drivers/virt/coco/efi_secret/efi_secret.c b/drivers/virt/coco/efi_secret/efi_secret.c index e700a5ef7043..d996feb0509a 100644 --- a/drivers/virt/coco/efi_secret/efi_secret.c +++ b/drivers/virt/coco/efi_secret/efi_secret.c @@ -136,15 +136,7 @@ static int efi_secret_unlink(struct inode *dir, struct dentry *dentry) if (s->fs_files[i] == dentry) s->fs_files[i] = NULL;
- /* - * securityfs_remove tries to lock the directory's inode, but we reach - * the unlink callback when it's already locked - */ - inode_unlock(dir); - securityfs_remove(dentry); - inode_lock(dir); - - return 0; + return simple_unlink(inode, dentry); }
static const struct inode_operations efi_secret_dir_inode_operations = { diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c index 84dca3695f86..e5e6d7f15918 100644 --- a/drivers/watchdog/dw_wdt.c +++ b/drivers/watchdog/dw_wdt.c @@ -644,6 +644,8 @@ static int dw_wdt_drv_probe(struct platform_device *pdev) } else { wdd->timeout = DW_WDT_DEFAULT_SECONDS; watchdog_init_timeout(wdd, 0, dev); + /* Limit timeout value to hardware constraints. */ + dw_wdt_set_timeout(wdd, wdd->timeout); }
platform_set_drvdata(pdev, dw_wdt); diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index dd297dcd524c..68973be2ce62 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -601,7 +601,11 @@ static int iTCO_wdt_probe(struct platform_device *pdev) /* Check that the heartbeat value is within it's range; if not reset to the default */ if (iTCO_wdt_set_timeout(&p->wddev, heartbeat)) { - iTCO_wdt_set_timeout(&p->wddev, WATCHDOG_TIMEOUT); + ret = iTCO_wdt_set_timeout(&p->wddev, WATCHDOG_TIMEOUT); + if (ret != 0) { + dev_err(dev, "Failed to set watchdog timeout (%d)\n", WATCHDOG_TIMEOUT); + return ret; + } dev_info(dev, "timeout value out of range, using %d\n", WATCHDOG_TIMEOUT); } diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c index 5f23913ce3b4..6ce1bfb39064 100644 --- a/drivers/watchdog/sbsa_gwdt.c +++ b/drivers/watchdog/sbsa_gwdt.c @@ -75,11 +75,17 @@ #define SBSA_GWDT_VERSION_MASK 0xF #define SBSA_GWDT_VERSION_SHIFT 16
+#define SBSA_GWDT_IMPL_MASK 0x7FF +#define SBSA_GWDT_IMPL_SHIFT 0 +#define SBSA_GWDT_IMPL_MEDIATEK 0x426 + /** * struct sbsa_gwdt - Internal representation of the SBSA GWDT * @wdd: kernel watchdog_device structure * @clk: store the System Counter clock frequency, in Hz. * @version: store the architecture version + * @need_ws0_race_workaround: + * indicate whether to adjust wdd->timeout to avoid a race with WS0 * @refresh_base: Virtual address of the watchdog refresh frame * @control_base: Virtual address of the watchdog control frame */ @@ -87,6 +93,7 @@ struct sbsa_gwdt { struct watchdog_device wdd; u32 clk; int version; + bool need_ws0_race_workaround; void __iomem *refresh_base; void __iomem *control_base; }; @@ -161,6 +168,31 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd, */ sbsa_gwdt_reg_write(((u64)gwdt->clk / 2) * timeout, gwdt);
+ /* + * Some watchdog hardware has a race condition where it will ignore + * sbsa_gwdt_keepalive() if it is called at the exact moment that a + * timeout occurs and WS0 is being asserted. Unfortunately, the default + * behavior of the watchdog core is very likely to trigger this race + * when action=0 because it programs WOR to be half of the desired + * timeout, and watchdog_next_keepalive() chooses the exact same time to + * send keepalive pings. + * + * This triggers a race where sbsa_gwdt_keepalive() can be called right + * as WS0 is being asserted, and affected hardware will ignore that + * write and continue to assert WS0. After another (timeout / 2) + * seconds, the same race happens again. If the driver wins then the + * explicit refresh will reset WS0 to false but if the hardware wins, + * then WS1 is asserted and the system resets. + * + * Avoid the problem by scheduling keepalive heartbeats one second later + * than the WOR timeout. + * + * This workaround might not be needed in a future revision of the + * hardware. + */ + if (gwdt->need_ws0_race_workaround) + wdd->min_hw_heartbeat_ms = timeout * 500 + 1000; + return 0; }
@@ -202,12 +234,15 @@ static int sbsa_gwdt_keepalive(struct watchdog_device *wdd) static void sbsa_gwdt_get_version(struct watchdog_device *wdd) { struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd); - int ver; + int iidr, ver, impl;
- ver = readl(gwdt->control_base + SBSA_GWDT_W_IIDR); - ver = (ver >> SBSA_GWDT_VERSION_SHIFT) & SBSA_GWDT_VERSION_MASK; + iidr = readl(gwdt->control_base + SBSA_GWDT_W_IIDR); + ver = (iidr >> SBSA_GWDT_VERSION_SHIFT) & SBSA_GWDT_VERSION_MASK; + impl = (iidr >> SBSA_GWDT_IMPL_SHIFT) & SBSA_GWDT_IMPL_MASK;
gwdt->version = ver; + gwdt->need_ws0_race_workaround = + !action && (impl == SBSA_GWDT_IMPL_MEDIATEK); }
static int sbsa_gwdt_start(struct watchdog_device *wdd) @@ -299,6 +334,15 @@ static int sbsa_gwdt_probe(struct platform_device *pdev) else wdd->max_hw_heartbeat_ms = GENMASK_ULL(47, 0) / gwdt->clk * 1000;
+ if (gwdt->need_ws0_race_workaround) { + /* + * A timeout of 3 seconds means that WOR will be set to 1.5 + * seconds and the heartbeat will be scheduled every 2.5 + * seconds. + */ + wdd->min_timeout = 3; + } + status = readl(cf_base + SBSA_GWDT_WCS); if (status & SBSA_GWDT_WCS_WS1) { dev_warn(dev, "System reset by WDT.\n"); diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index a2ba1c7fc16a..10f803d0534e 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -222,8 +222,8 @@ static void free_pref(struct prelim_ref *ref) * A -1 return indicates ref1 is a 'lower' block than ref2, while 1 * indicates a 'higher' block. */ -static int prelim_ref_compare(struct prelim_ref *ref1, - struct prelim_ref *ref2) +static int prelim_ref_compare(const struct prelim_ref *ref1, + const struct prelim_ref *ref2) { if (ref1->level < ref2->level) return -1; @@ -254,7 +254,7 @@ static int prelim_ref_compare(struct prelim_ref *ref1, }
static void update_share_count(struct share_check *sc, int oldcount, - int newcount, struct prelim_ref *newref) + int newcount, const struct prelim_ref *newref) { if ((!sc) || (oldcount == 0 && newcount < 1)) return; diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 226e6434a58a..5a3a41c6d509 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -23,7 +23,7 @@ #include "extent-tree.h"
#ifdef CONFIG_BTRFS_DEBUG -int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group) +int btrfs_should_fragment_free_space(const struct btrfs_block_group *block_group) { struct btrfs_fs_info *fs_info = block_group->fs_info;
@@ -34,15 +34,28 @@ int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group) } #endif
+static inline bool has_unwritten_metadata(struct btrfs_block_group *block_group) +{ + /* The meta_write_pointer is available only on the zoned setup. */ + if (!btrfs_is_zoned(block_group->fs_info)) + return false; + + if (block_group->flags & BTRFS_BLOCK_GROUP_DATA) + return false; + + return block_group->start + block_group->alloc_offset > + block_group->meta_write_pointer; +} + /* * Return target flags in extended format or 0 if restripe for this chunk_type * is not in progress * * Should be called with balance_lock held */ -static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) +static u64 get_restripe_target(const struct btrfs_fs_info *fs_info, u64 flags) { - struct btrfs_balance_control *bctl = fs_info->balance_ctl; + const struct btrfs_balance_control *bctl = fs_info->balance_ctl; u64 target = 0;
if (!bctl) @@ -1240,6 +1253,15 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, goto out;
spin_lock(&block_group->lock); + /* + * Hitting this WARN means we removed a block group with an unwritten + * region. It will cause "unable to find chunk map for logical" errors. + */ + if (WARN_ON(has_unwritten_metadata(block_group))) + btrfs_warn(fs_info, + "block group %llu is removed before metadata write out", + block_group->start); + set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags);
/* @@ -1418,9 +1440,9 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force) }
static bool clean_pinned_extents(struct btrfs_trans_handle *trans, - struct btrfs_block_group *bg) + const struct btrfs_block_group *bg) { - struct btrfs_fs_info *fs_info = bg->fs_info; + struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_transaction *prev_trans = NULL; const u64 start = bg->start; const u64 end = start + bg->length - 1; @@ -1563,8 +1585,9 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) * needing to allocate extents from the block group. */ used = btrfs_space_info_used(space_info, true); - if (space_info->total_bytes - block_group->length < used && - block_group->zone_unusable < block_group->length) { + if ((space_info->total_bytes - block_group->length < used && + block_group->zone_unusable < block_group->length) || + has_unwritten_metadata(block_group)) { /* * Add a reference for the list, compensate for the ref * drop under the "next" label for the @@ -1752,14 +1775,14 @@ static int reclaim_bgs_cmp(void *unused, const struct list_head *a, return bg1->used > bg2->used; }
-static inline bool btrfs_should_reclaim(struct btrfs_fs_info *fs_info) +static inline bool btrfs_should_reclaim(const struct btrfs_fs_info *fs_info) { if (btrfs_is_zoned(fs_info)) return btrfs_zoned_should_reclaim(fs_info); return true; }
-static bool should_reclaim_block_group(struct btrfs_block_group *bg, u64 bytes_freed) +static bool should_reclaim_block_group(const struct btrfs_block_group *bg, u64 bytes_freed) { const struct btrfs_space_info *space_info = bg->space_info; const int reclaim_thresh = READ_ONCE(space_info->bg_reclaim_threshold); @@ -1991,8 +2014,8 @@ void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg) spin_unlock(&fs_info->unused_bgs_lock); }
-static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key, - struct btrfs_path *path) +static int read_bg_from_eb(struct btrfs_fs_info *fs_info, const struct btrfs_key *key, + const struct btrfs_path *path) { struct extent_map_tree *em_tree; struct extent_map *em; @@ -2044,7 +2067,7 @@ static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
static int find_first_block_group(struct btrfs_fs_info *fs_info, struct btrfs_path *path, - struct btrfs_key *key) + const struct btrfs_key *key) { struct btrfs_root *root = btrfs_block_group_root(fs_info); int ret; @@ -2636,8 +2659,8 @@ static int insert_block_group_item(struct btrfs_trans_handle *trans, }
static int insert_dev_extent(struct btrfs_trans_handle *trans, - struct btrfs_device *device, u64 chunk_offset, - u64 start, u64 num_bytes) + const struct btrfs_device *device, u64 chunk_offset, + u64 start, u64 num_bytes) { struct btrfs_fs_info *fs_info = device->fs_info; struct btrfs_root *root = fs_info->dev_root; @@ -2787,7 +2810,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) * For extent tree v2 we use the block_group_item->chunk_offset to point at our * global root id. For v1 it's always set to BTRFS_FIRST_CHUNK_TREE_OBJECTID. */ -static u64 calculate_global_root_id(struct btrfs_fs_info *fs_info, u64 offset) +static u64 calculate_global_root_id(const struct btrfs_fs_info *fs_info, u64 offset) { u64 div = SZ_1G; u64 index; @@ -3823,8 +3846,8 @@ static void force_metadata_allocation(struct btrfs_fs_info *info) } }
-static int should_alloc_chunk(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *sinfo, int force) +static int should_alloc_chunk(const struct btrfs_fs_info *fs_info, + const struct btrfs_space_info *sinfo, int force) { u64 bytes_used = btrfs_space_info_used(sinfo, false); u64 thresh; @@ -4199,7 +4222,7 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, return ret; }
-static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) +static u64 get_profile_num_devs(const struct btrfs_fs_info *fs_info, u64 type) { u64 num_dev;
@@ -4606,7 +4629,7 @@ int btrfs_use_block_group_size_class(struct btrfs_block_group *bg, return 0; }
-bool btrfs_block_group_should_use_size_class(struct btrfs_block_group *bg) +bool btrfs_block_group_should_use_size_class(const struct btrfs_block_group *bg) { if (btrfs_is_zoned(bg->fs_info)) return false; diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 089979981e4a..a8a6a21e393d 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -250,7 +250,7 @@ struct btrfs_block_group { enum btrfs_block_group_size_class size_class; };
-static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group) +static inline u64 btrfs_block_group_end(const struct btrfs_block_group *block_group) { return (block_group->start + block_group->length); } @@ -262,8 +262,7 @@ static inline bool btrfs_is_block_group_used(const struct btrfs_block_group *bg) return (bg->used > 0 || bg->reserved > 0 || bg->pinned > 0); }
-static inline bool btrfs_is_block_group_data_only( - struct btrfs_block_group *block_group) +static inline bool btrfs_is_block_group_data_only(const struct btrfs_block_group *block_group) { /* * In mixed mode the fragmentation is expected to be high, lowering the @@ -274,7 +273,7 @@ static inline bool btrfs_is_block_group_data_only( }
#ifdef CONFIG_BTRFS_DEBUG -int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group); +int btrfs_should_fragment_free_space(const struct btrfs_block_group *block_group); #endif
struct btrfs_block_group *btrfs_lookup_first_block_group( @@ -355,7 +354,7 @@ static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info) return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); }
-static inline int btrfs_block_group_done(struct btrfs_block_group *cache) +static inline int btrfs_block_group_done(const struct btrfs_block_group *cache) { smp_mb(); return cache->cached == BTRFS_CACHE_FINISHED || @@ -372,6 +371,6 @@ enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size); int btrfs_use_block_group_size_class(struct btrfs_block_group *bg, enum btrfs_block_group_size_class size_class, bool force_wrong_size_class); -bool btrfs_block_group_should_use_size_class(struct btrfs_block_group *bg); +bool btrfs_block_group_should_use_size_class(const struct btrfs_block_group *bg);
#endif /* BTRFS_BLOCK_GROUP_H */ diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c index db8da4e7b228..97084ea3af0c 100644 --- a/fs/btrfs/block-rsv.c +++ b/fs/btrfs/block-rsv.c @@ -547,7 +547,7 @@ struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans, return ERR_PTR(ret); }
-int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info, +int btrfs_check_trunc_cache_free_space(const struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *rsv) { u64 needed_bytes; diff --git a/fs/btrfs/block-rsv.h b/fs/btrfs/block-rsv.h index 43a9a6b5a79f..3c9a15f59731 100644 --- a/fs/btrfs/block-rsv.h +++ b/fs/btrfs/block-rsv.h @@ -82,7 +82,7 @@ void btrfs_release_global_block_rsv(struct btrfs_fs_info *fs_info); struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans, struct btrfs_root *root, u32 blocksize); -int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info, +int btrfs_check_trunc_cache_free_space(const struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *rsv); static inline void btrfs_unuse_block_rsv(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *block_rsv, diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index c23c56ead6b2..c4968efc3fc4 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -251,7 +251,8 @@ struct btrfs_inode { struct btrfs_delayed_node *delayed_node;
/* File creation time. */ - struct timespec64 i_otime; + u64 i_otime_sec; + u32 i_otime_nsec;
/* Hook into fs_info->delayed_iputs */ struct list_head delayed_iput; diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 4b21ca49b666..31b1b448efc2 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -347,7 +347,14 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
- WARN_ON(btrfs_header_generation(buf) > trans->transid); + if (unlikely(btrfs_header_generation(buf) > trans->transid)) { + btrfs_tree_unlock(cow); + free_extent_buffer(cow); + ret = -EUCLEAN; + btrfs_abort_transaction(trans, ret); + return ret; + } + if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) ret = btrfs_inc_ref(trans, root, cow, 1); else @@ -2712,7 +2719,7 @@ int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key, * */ static void fixup_low_keys(struct btrfs_trans_handle *trans, - struct btrfs_path *path, + const struct btrfs_path *path, struct btrfs_disk_key *key, int level) { int i; @@ -2742,7 +2749,7 @@ static void fixup_low_keys(struct btrfs_trans_handle *trans, * that the new key won't break the order */ void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, - struct btrfs_path *path, + const struct btrfs_path *path, const struct btrfs_key *new_key) { struct btrfs_fs_info *fs_info = trans->fs_info; @@ -2808,8 +2815,8 @@ void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, * is correct, we only need to bother the last key of @left and the first * key of @right. */ -static bool check_sibling_keys(struct extent_buffer *left, - struct extent_buffer *right) +static bool check_sibling_keys(const struct extent_buffer *left, + const struct extent_buffer *right) { struct btrfs_key left_last; struct btrfs_key right_first; @@ -3049,6 +3056,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, if (ret < 0) { int ret2;
+ btrfs_clear_buffer_dirty(trans, c); ret2 = btrfs_free_tree_block(trans, btrfs_root_id(root), c, 0, 1); if (ret2 < 0) btrfs_abort_transaction(trans, ret2); @@ -3077,7 +3085,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, * blocknr is the block the key points to. */ static int insert_ptr(struct btrfs_trans_handle *trans, - struct btrfs_path *path, + const struct btrfs_path *path, struct btrfs_disk_key *key, u64 bytenr, int slot, int level) { @@ -4168,7 +4176,7 @@ int btrfs_split_item(struct btrfs_trans_handle *trans, * the front. */ void btrfs_truncate_item(struct btrfs_trans_handle *trans, - struct btrfs_path *path, u32 new_size, int from_end) + const struct btrfs_path *path, u32 new_size, int from_end) { int slot; struct extent_buffer *leaf; @@ -4260,7 +4268,7 @@ void btrfs_truncate_item(struct btrfs_trans_handle *trans, * make the item pointed to by the path bigger, data_size is the added size. */ void btrfs_extend_item(struct btrfs_trans_handle *trans, - struct btrfs_path *path, u32 data_size) + const struct btrfs_path *path, u32 data_size) { int slot; struct extent_buffer *leaf; diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 7df3ed2945b0..834af67fac23 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -521,7 +521,7 @@ int btrfs_previous_item(struct btrfs_root *root, int btrfs_previous_extent_item(struct btrfs_root *root, struct btrfs_path *path, u64 min_objectid); void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, - struct btrfs_path *path, + const struct btrfs_path *path, const struct btrfs_key *new_key); struct extent_buffer *btrfs_root_node(struct btrfs_root *root); int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, @@ -555,9 +555,9 @@ int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans, int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level, int slot); void btrfs_extend_item(struct btrfs_trans_handle *trans, - struct btrfs_path *path, u32 data_size); + const struct btrfs_path *path, u32 data_size); void btrfs_truncate_item(struct btrfs_trans_handle *trans, - struct btrfs_path *path, u32 new_size, int from_end); + const struct btrfs_path *path, u32 new_size, int from_end); int btrfs_split_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 32c5f5a8a0e9..c39e39142abf 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1849,10 +1849,8 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans, btrfs_set_stack_timespec_nsec(&inode_item->ctime, inode_get_ctime(inode).tv_nsec);
- btrfs_set_stack_timespec_sec(&inode_item->otime, - BTRFS_I(inode)->i_otime.tv_sec); - btrfs_set_stack_timespec_nsec(&inode_item->otime, - BTRFS_I(inode)->i_otime.tv_nsec); + btrfs_set_stack_timespec_sec(&inode_item->otime, BTRFS_I(inode)->i_otime_sec); + btrfs_set_stack_timespec_nsec(&inode_item->otime, BTRFS_I(inode)->i_otime_nsec); }
int btrfs_fill_inode(struct inode *inode, u32 *rdev) @@ -1901,10 +1899,8 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev) inode_set_ctime(inode, btrfs_stack_timespec_sec(&inode_item->ctime), btrfs_stack_timespec_nsec(&inode_item->ctime));
- BTRFS_I(inode)->i_otime.tv_sec = - btrfs_stack_timespec_sec(&inode_item->otime); - BTRFS_I(inode)->i_otime.tv_nsec = - btrfs_stack_timespec_nsec(&inode_item->otime); + BTRFS_I(inode)->i_otime_sec = btrfs_stack_timespec_sec(&inode_item->otime); + BTRFS_I(inode)->i_otime_nsec = btrfs_stack_timespec_nsec(&inode_item->otime);
inode->i_generation = BTRFS_I(inode)->generation; BTRFS_I(inode)->index_cnt = (u64)-1; diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c index 3981c941f5b5..d6eef4bd9e9d 100644 --- a/fs/btrfs/discard.c +++ b/fs/btrfs/discard.c @@ -68,7 +68,7 @@ static int discard_minlen[BTRFS_NR_DISCARD_LISTS] = { };
static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl, - struct btrfs_block_group *block_group) + const struct btrfs_block_group *block_group) { return &discard_ctl->discard_list[block_group->discard_index]; } @@ -80,7 +80,7 @@ static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl, * * Check if the file system is writeable and BTRFS_FS_DISCARD_RUNNING is set. */ -static bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl) +static bool btrfs_run_discard_work(const struct btrfs_discard_ctl *discard_ctl) { struct btrfs_fs_info *fs_info = container_of(discard_ctl, struct btrfs_fs_info, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index ef77d4208510..8248113eb067 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3530,6 +3530,21 @@ btrfs_release_block_group(struct btrfs_block_group *cache, btrfs_put_block_group(cache); }
+static bool find_free_extent_check_size_class(const struct find_free_extent_ctl *ffe_ctl, + const struct btrfs_block_group *bg) +{ + if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED) + return true; + if (!btrfs_block_group_should_use_size_class(bg)) + return true; + if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS) + return true; + if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS && + bg->size_class == BTRFS_BG_SZ_NONE) + return true; + return ffe_ctl->size_class == bg->size_class; +} + /* * Helper function for find_free_extent(). * @@ -3551,7 +3566,8 @@ static int find_free_extent_clustered(struct btrfs_block_group *bg, if (!cluster_bg) goto refill_cluster; if (cluster_bg != bg && (cluster_bg->ro || - !block_group_bits(cluster_bg, ffe_ctl->flags))) + !block_group_bits(cluster_bg, ffe_ctl->flags) || + !find_free_extent_check_size_class(ffe_ctl, cluster_bg))) goto release_cluster;
offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr, @@ -4107,21 +4123,6 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info, return -ENOSPC; }
-static bool find_free_extent_check_size_class(struct find_free_extent_ctl *ffe_ctl, - struct btrfs_block_group *bg) -{ - if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED) - return true; - if (!btrfs_block_group_should_use_size_class(bg)) - return true; - if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS) - return true; - if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS && - bg->size_class == BTRFS_BG_SZ_NONE) - return true; - return ffe_ctl->size_class == bg->size_class; -} - static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info, struct find_free_extent_ctl *ffe_ctl, struct btrfs_space_info *space_info, diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 45cae356e89b..ea5759a689b9 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -153,7 +153,7 @@ static inline u32 max_ordered_sum_bytes(const struct btrfs_fs_info *fs_info) * Calculate the total size needed to allocate for an ordered sum structure * spanning @bytes in the file. */ -static int btrfs_ordered_sum_size(struct btrfs_fs_info *fs_info, unsigned long bytes) +static int btrfs_ordered_sum_size(const struct btrfs_fs_info *fs_info, unsigned long bytes) { return sizeof(struct btrfs_ordered_sum) + bytes_to_csum_size(fs_info, bytes); } @@ -1263,7 +1263,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, const struct btrfs_path *path, - struct btrfs_file_extent_item *fi, + const struct btrfs_file_extent_item *fi, struct extent_map *em) { struct btrfs_fs_info *fs_info = inode->root->fs_info; diff --git a/fs/btrfs/file-item.h b/fs/btrfs/file-item.h index 04bd2d34efb1..2b1d08b88b61 100644 --- a/fs/btrfs/file-item.h +++ b/fs/btrfs/file-item.h @@ -62,7 +62,7 @@ int btrfs_lookup_csums_bitmap(struct btrfs_root *root, struct btrfs_path *path, unsigned long *csum_bitmap); void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, const struct btrfs_path *path, - struct btrfs_file_extent_item *fi, + const struct btrfs_file_extent_item *fi, struct extent_map *em); int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start, u64 len); diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c index 300ee0b68b49..8efe3a9369df 100644 --- a/fs/btrfs/free-space-tree.c +++ b/fs/btrfs/free-space-tree.c @@ -1371,12 +1371,17 @@ static int __add_block_group_free_space(struct btrfs_trans_handle *trans, clear_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags);
ret = add_new_free_space_info(trans, block_group, path); - if (ret) + if (ret) { + btrfs_abort_transaction(trans, ret); return ret; + } + + ret = __add_to_free_space_tree(trans, block_group, path, + block_group->start, block_group->length); + if (ret) + btrfs_abort_transaction(trans, ret);
- return __add_to_free_space_tree(trans, block_group, path, - block_group->start, - block_group->length); + return 0; }
int add_block_group_free_space(struct btrfs_trans_handle *trans, @@ -1396,16 +1401,14 @@ int add_block_group_free_space(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; + btrfs_abort_transaction(trans, ret); goto out; }
ret = __add_block_group_free_space(trans, block_group, path); - out: btrfs_free_path(path); mutex_unlock(&block_group->free_space_lock); - if (ret) - btrfs_abort_transaction(trans, ret); return ret; }
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c index d3ff97374d48..ab741a578424 100644 --- a/fs/btrfs/inode-item.c +++ b/fs/btrfs/inode-item.c @@ -15,7 +15,7 @@ #include "extent-tree.h" #include "file-item.h"
-struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf, +struct btrfs_inode_ref *btrfs_find_name_in_backref(const struct extent_buffer *leaf, int slot, const struct fscrypt_str *name) { @@ -43,7 +43,7 @@ struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf, }
struct btrfs_inode_extref *btrfs_find_name_in_ext_backref( - struct extent_buffer *leaf, int slot, u64 ref_objectid, + const struct extent_buffer *leaf, int slot, u64 ref_objectid, const struct fscrypt_str *name) { struct btrfs_inode_extref *extref; @@ -424,9 +424,9 @@ int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root return ret; }
-static inline void btrfs_trace_truncate(struct btrfs_inode *inode, - struct extent_buffer *leaf, - struct btrfs_file_extent_item *fi, +static inline void btrfs_trace_truncate(const struct btrfs_inode *inode, + const struct extent_buffer *leaf, + const struct btrfs_file_extent_item *fi, u64 offset, int extent_type, int slot) { if (!inode) diff --git a/fs/btrfs/inode-item.h b/fs/btrfs/inode-item.h index ede43b6c6559..d43633d5620f 100644 --- a/fs/btrfs/inode-item.h +++ b/fs/btrfs/inode-item.h @@ -100,11 +100,11 @@ struct btrfs_inode_extref *btrfs_lookup_inode_extref( u64 inode_objectid, u64 ref_objectid, int ins_len, int cow);
-struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf, +struct btrfs_inode_ref *btrfs_find_name_in_backref(const struct extent_buffer *leaf, int slot, const struct fscrypt_str *name); struct btrfs_inode_extref *btrfs_find_name_in_ext_backref( - struct extent_buffer *leaf, int slot, u64 ref_objectid, + const struct extent_buffer *leaf, int slot, u64 ref_objectid, const struct fscrypt_str *name);
#endif diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 218d15f5ddf7..4502a474a81d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3785,10 +3785,8 @@ static int btrfs_read_locked_inode(struct inode *inode, inode_set_ctime(inode, btrfs_timespec_sec(leaf, &inode_item->ctime), btrfs_timespec_nsec(leaf, &inode_item->ctime));
- BTRFS_I(inode)->i_otime.tv_sec = - btrfs_timespec_sec(leaf, &inode_item->otime); - BTRFS_I(inode)->i_otime.tv_nsec = - btrfs_timespec_nsec(leaf, &inode_item->otime); + BTRFS_I(inode)->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime); + BTRFS_I(inode)->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime);
inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); @@ -3958,10 +3956,8 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, btrfs_set_token_timespec_nsec(&token, &item->ctime, inode_get_ctime(inode).tv_nsec);
- btrfs_set_token_timespec_sec(&token, &item->otime, - BTRFS_I(inode)->i_otime.tv_sec); - btrfs_set_token_timespec_nsec(&token, &item->otime, - BTRFS_I(inode)->i_otime.tv_nsec); + btrfs_set_token_timespec_sec(&token, &item->otime, BTRFS_I(inode)->i_otime_sec); + btrfs_set_token_timespec_nsec(&token, &item->otime, BTRFS_I(inode)->i_otime_nsec);
btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode)); btrfs_set_token_inode_generation(&token, item, @@ -5644,7 +5640,8 @@ static struct inode *new_simple_dir(struct inode *dir, inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; inode->i_mtime = inode_set_ctime_current(inode); inode->i_atime = dir->i_atime; - BTRFS_I(inode)->i_otime = inode->i_mtime; + BTRFS_I(inode)->i_otime_sec = inode->i_mtime.tv_sec; + BTRFS_I(inode)->i_otime_nsec = inode->i_mtime.tv_nsec; inode->i_uid = dir->i_uid; inode->i_gid = dir->i_gid;
@@ -6321,7 +6318,8 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
inode->i_mtime = inode_set_ctime_current(inode); inode->i_atime = inode->i_mtime; - BTRFS_I(inode)->i_otime = inode->i_mtime; + BTRFS_I(inode)->i_otime_sec = inode->i_mtime.tv_sec; + BTRFS_I(inode)->i_otime_nsec = inode->i_mtime.tv_nsec;
/* * We're going to fill the inode item now, so at this point the inode @@ -8550,8 +8548,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
ei->delayed_node = NULL;
- ei->i_otime.tv_sec = 0; - ei->i_otime.tv_nsec = 0; + ei->i_otime_sec = 0; + ei->i_otime_nsec = 0;
inode = &ei->vfs_inode; extent_map_tree_init(&ei->extent_tree); @@ -8703,8 +8701,8 @@ static int btrfs_getattr(struct mnt_idmap *idmap, u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
stat->result_mask |= STATX_BTIME; - stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec; - stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec; + stat->btime.tv_sec = BTRFS_I(inode)->i_otime_sec; + stat->btime.tv_nsec = BTRFS_I(inode)->i_otime_nsec; if (bi_flags & BTRFS_INODE_APPEND) stat->attributes |= STATX_ATTR_APPEND; if (bi_flags & BTRFS_INODE_COMPRESS) diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 1b9f4f16d124..c46ea2ecf188 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -579,22 +579,30 @@ bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info)
/* * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(), - * first two are in single-threaded paths.And for the third one, we have set - * quota_root to be null with qgroup_lock held before, so it is safe to clean - * up the in-memory structures without qgroup_lock held. + * first two are in single-threaded paths. */ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) { struct rb_node *n; struct btrfs_qgroup *qgroup;
+ /* + * btrfs_quota_disable() can be called concurrently with + * btrfs_qgroup_rescan() -> qgroup_rescan_zero_tracking(), so take the + * lock. + */ + spin_lock(&fs_info->qgroup_lock); while ((n = rb_first(&fs_info->qgroup_tree))) { qgroup = rb_entry(n, struct btrfs_qgroup, node); rb_erase(n, &fs_info->qgroup_tree); __del_qgroup_rb(fs_info, qgroup); + spin_unlock(&fs_info->qgroup_lock); btrfs_sysfs_del_one_qgroup(fs_info, qgroup); kfree(qgroup); + spin_lock(&fs_info->qgroup_lock); } + spin_unlock(&fs_info->qgroup_lock); + /* * We call btrfs_free_qgroup_config() when unmounting * filesystem and disabling quota, so we set qgroup_ulist @@ -3616,12 +3624,21 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) qgroup_rescan_zero_tracking(fs_info);
mutex_lock(&fs_info->qgroup_rescan_lock); - fs_info->qgroup_rescan_running = true; - btrfs_queue_work(fs_info->qgroup_rescan_workers, - &fs_info->qgroup_rescan_work); + /* + * The rescan worker is only for full accounting qgroups, check if it's + * enabled as it is pointless to queue it otherwise. A concurrent quota + * disable may also have just cleared BTRFS_FS_QUOTA_ENABLED. + */ + if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { + fs_info->qgroup_rescan_running = true; + btrfs_queue_work(fs_info->qgroup_rescan_workers, + &fs_info->qgroup_rescan_work); + } else { + ret = -ENOTCONN; + } mutex_unlock(&fs_info->qgroup_rescan_lock);
- return 0; + return ret; }
int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 474758c878fc..8cc1f4b83277 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -693,6 +693,25 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, if (root->root_key.objectid == objectid) { u64 commit_root_gen;
+ /* + * Relocation will wait for cleaner thread, and any half-dropped + * subvolume will be fully cleaned up at mount time. + * So here we shouldn't hit a subvolume with non-zero drop_progress. + * + * If this isn't the case, error out since it can make us attempt to + * drop references for extents that were already dropped before. + */ + if (unlikely(btrfs_disk_key_objectid(&root->root_item.drop_progress))) { + struct btrfs_key cpu_key; + + btrfs_disk_key_to_cpu(&cpu_key, &root->root_item.drop_progress); + btrfs_err(fs_info, + "cannot relocate partially dropped subvolume %llu, drop progress key (%llu %u %llu)", + objectid, cpu_key.objectid, cpu_key.type, cpu_key.offset); + ret = -EUCLEAN; + goto fail; + } + /* called by btrfs_init_reloc_root */ ret = btrfs_copy_root(trans, root, root->commit_root, &eb, BTRFS_TREE_RELOC_OBJECTID); diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index e2ead36e5be4..c25eb4416a67 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -4,6 +4,7 @@ */
#include <linux/bsearch.h> +#include <linux/falloc.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/sort.h> @@ -179,6 +180,7 @@ struct send_ctx { u64 cur_inode_rdev; u64 cur_inode_last_extent; u64 cur_inode_next_write_offset; + struct fs_path cur_inode_path; bool cur_inode_new; bool cur_inode_new_gen; bool cur_inode_deleted; @@ -436,6 +438,14 @@ static void fs_path_reset(struct fs_path *p) } }
+static void init_path(struct fs_path *p) +{ + p->reversed = 0; + p->buf = p->inline_buf; + p->buf_len = FS_PATH_INLINE_SIZE; + fs_path_reset(p); +} + static struct fs_path *fs_path_alloc(void) { struct fs_path *p; @@ -443,10 +453,7 @@ static struct fs_path *fs_path_alloc(void) p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return NULL; - p->reversed = 0; - p->buf = p->inline_buf; - p->buf_len = FS_PATH_INLINE_SIZE; - fs_path_reset(p); + init_path(p); return p; }
@@ -471,7 +478,7 @@ static void fs_path_free(struct fs_path *p) kfree(p); }
-static int fs_path_len(struct fs_path *p) +static inline int fs_path_len(const struct fs_path *p) { return p->end - p->start; } @@ -624,6 +631,14 @@ static void fs_path_unreverse(struct fs_path *p) p->reversed = 0; }
+static inline bool is_current_inode_path(const struct send_ctx *sctx, + const struct fs_path *path) +{ + const struct fs_path *cur = &sctx->cur_inode_path; + + return (strncmp(path->start, cur->start, fs_path_len(cur)) == 0); +} + static struct btrfs_path *alloc_path_for_send(void) { struct btrfs_path *path; @@ -2450,6 +2465,14 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen, u64 parent_inode = 0; u64 parent_gen = 0; int stop = 0; + const bool is_cur_inode = (ino == sctx->cur_ino && gen == sctx->cur_inode_gen); + + if (is_cur_inode && fs_path_len(&sctx->cur_inode_path) > 0) { + if (dest != &sctx->cur_inode_path) + return fs_path_copy(dest, &sctx->cur_inode_path); + + return 0; + }
name = fs_path_alloc(); if (!name) { @@ -2501,8 +2524,12 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
out: fs_path_free(name); - if (!ret) + if (!ret) { fs_path_unreverse(dest); + if (is_cur_inode && dest != &sctx->cur_inode_path) + ret = fs_path_copy(&sctx->cur_inode_path, dest); + } + return ret; }
@@ -2597,6 +2624,47 @@ static int send_subvol_begin(struct send_ctx *sctx) return ret; }
+static struct fs_path *get_cur_inode_path(struct send_ctx *sctx) +{ + if (fs_path_len(&sctx->cur_inode_path) == 0) { + int ret; + + ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, + &sctx->cur_inode_path); + if (ret < 0) + return ERR_PTR(ret); + } + + return &sctx->cur_inode_path; +} + +static struct fs_path *get_path_for_command(struct send_ctx *sctx, u64 ino, u64 gen) +{ + struct fs_path *path; + int ret; + + if (ino == sctx->cur_ino && gen == sctx->cur_inode_gen) + return get_cur_inode_path(sctx); + + path = fs_path_alloc(); + if (!path) + return ERR_PTR(-ENOMEM); + + ret = get_cur_path(sctx, ino, gen, path); + if (ret < 0) { + fs_path_free(path); + return ERR_PTR(ret); + } + + return path; +} + +static void free_path_for_command(const struct send_ctx *sctx, struct fs_path *path) +{ + if (path != &sctx->cur_inode_path) + fs_path_free(path); +} + static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size) { struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; @@ -2605,17 +2673,14 @@ static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
- p = fs_path_alloc(); - if (!p) - return -ENOMEM; + p = get_path_for_command(sctx, ino, gen); + if (IS_ERR(p)) + return PTR_ERR(p);
ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE); if (ret < 0) goto out;
- ret = get_cur_path(sctx, ino, gen, p); - if (ret < 0) - goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
@@ -2623,7 +2688,7 @@ static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
tlv_put_failure: out: - fs_path_free(p); + free_path_for_command(sctx, p); return ret; }
@@ -2635,17 +2700,14 @@ static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
- p = fs_path_alloc(); - if (!p) - return -ENOMEM; + p = get_path_for_command(sctx, ino, gen); + if (IS_ERR(p)) + return PTR_ERR(p);
ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD); if (ret < 0) goto out;
- ret = get_cur_path(sctx, ino, gen, p); - if (ret < 0) - goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
@@ -2653,7 +2715,7 @@ static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
tlv_put_failure: out: - fs_path_free(p); + free_path_for_command(sctx, p); return ret; }
@@ -2668,17 +2730,14 @@ static int send_fileattr(struct send_ctx *sctx, u64 ino, u64 gen, u64 fileattr)
btrfs_debug(fs_info, "send_fileattr %llu fileattr=%llu", ino, fileattr);
- p = fs_path_alloc(); - if (!p) - return -ENOMEM; + p = get_path_for_command(sctx, ino, gen); + if (IS_ERR(p)) + return PTR_ERR(p);
ret = begin_cmd(sctx, BTRFS_SEND_C_FILEATTR); if (ret < 0) goto out;
- ret = get_cur_path(sctx, ino, gen, p); - if (ret < 0) - goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); TLV_PUT_U64(sctx, BTRFS_SEND_A_FILEATTR, fileattr);
@@ -2686,7 +2745,7 @@ static int send_fileattr(struct send_ctx *sctx, u64 ino, u64 gen, u64 fileattr)
tlv_put_failure: out: - fs_path_free(p); + free_path_for_command(sctx, p); return ret; }
@@ -2699,17 +2758,14 @@ static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid) btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu", ino, uid, gid);
- p = fs_path_alloc(); - if (!p) - return -ENOMEM; + p = get_path_for_command(sctx, ino, gen); + if (IS_ERR(p)) + return PTR_ERR(p);
ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN); if (ret < 0) goto out;
- ret = get_cur_path(sctx, ino, gen, p); - if (ret < 0) - goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid); TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid); @@ -2718,7 +2774,7 @@ static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
tlv_put_failure: out: - fs_path_free(p); + free_path_for_command(sctx, p); return ret; }
@@ -2735,9 +2791,9 @@ static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
btrfs_debug(fs_info, "send_utimes %llu", ino);
- p = fs_path_alloc(); - if (!p) - return -ENOMEM; + p = get_path_for_command(sctx, ino, gen); + if (IS_ERR(p)) + return PTR_ERR(p);
path = alloc_path_for_send(); if (!path) { @@ -2762,9 +2818,6 @@ static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen) if (ret < 0) goto out;
- ret = get_cur_path(sctx, ino, gen, p); - if (ret < 0) - goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime); TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime); @@ -2776,7 +2829,7 @@ static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
tlv_put_failure: out: - fs_path_free(p); + free_path_for_command(sctx, p); btrfs_free_path(path); return ret; } @@ -3113,6 +3166,11 @@ static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen, goto out;
ret = send_rename(sctx, path, orphan); + if (ret < 0) + goto out; + + if (ino == sctx->cur_ino && gen == sctx->cur_inode_gen) + ret = fs_path_copy(&sctx->cur_inode_path, orphan);
out: fs_path_free(orphan); @@ -4166,6 +4224,23 @@ static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref) return ret; }
+static int rename_current_inode(struct send_ctx *sctx, + struct fs_path *current_path, + struct fs_path *new_path) +{ + int ret; + + ret = send_rename(sctx, current_path, new_path); + if (ret < 0) + return ret; + + ret = fs_path_copy(&sctx->cur_inode_path, new_path); + if (ret < 0) + return ret; + + return fs_path_copy(current_path, new_path); +} + /* * This does all the move/link/unlink/rmdir magic. */ @@ -4180,9 +4255,9 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) u64 ow_inode = 0; u64 ow_gen; u64 ow_mode; - int did_overwrite = 0; - int is_orphan = 0; u64 last_dir_ino_rm = 0; + bool did_overwrite = false; + bool is_orphan = false; bool can_rename = true; bool orphanized_dir = false; bool orphanized_ancestor = false; @@ -4224,14 +4299,14 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) if (ret < 0) goto out; if (ret) - did_overwrite = 1; + did_overwrite = true; } if (sctx->cur_inode_new || did_overwrite) { ret = gen_unique_name(sctx, sctx->cur_ino, sctx->cur_inode_gen, valid_path); if (ret < 0) goto out; - is_orphan = 1; + is_orphan = true; } else { ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, valid_path); @@ -4356,6 +4431,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) if (ret > 0) { orphanized_ancestor = true; fs_path_reset(valid_path); + fs_path_reset(&sctx->cur_inode_path); ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, valid_path); @@ -4451,13 +4527,10 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) * it depending on the inode mode. */ if (is_orphan && can_rename) { - ret = send_rename(sctx, valid_path, cur->full_path); - if (ret < 0) - goto out; - is_orphan = 0; - ret = fs_path_copy(valid_path, cur->full_path); + ret = rename_current_inode(sctx, valid_path, cur->full_path); if (ret < 0) goto out; + is_orphan = false; } else if (can_rename) { if (S_ISDIR(sctx->cur_inode_mode)) { /* @@ -4465,10 +4538,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) * dirs, we always have one new and one deleted * ref. The deleted ref is ignored later. */ - ret = send_rename(sctx, valid_path, - cur->full_path); - if (!ret) - ret = fs_path_copy(valid_path, + ret = rename_current_inode(sctx, valid_path, cur->full_path); if (ret < 0) goto out; @@ -4515,7 +4585,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) sctx->cur_inode_gen, valid_path); if (ret < 0) goto out; - is_orphan = 1; + is_orphan = true; }
list_for_each_entry(cur, &sctx->deleted_refs, list) { @@ -4561,6 +4631,8 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) ret = send_unlink(sctx, cur->full_path); if (ret < 0) goto out; + if (is_current_inode_path(sctx, cur->full_path)) + fs_path_reset(&sctx->cur_inode_path); } ret = dup_ref(cur, &check_dirs); if (ret < 0) @@ -4879,11 +4951,15 @@ static int process_all_refs(struct send_ctx *sctx, }
static int send_set_xattr(struct send_ctx *sctx, - struct fs_path *path, const char *name, int name_len, const char *data, int data_len) { - int ret = 0; + struct fs_path *path; + int ret; + + path = get_cur_inode_path(sctx); + if (IS_ERR(path)) + return PTR_ERR(path);
ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR); if (ret < 0) @@ -4924,19 +5000,13 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key, const char *name, int name_len, const char *data, int data_len, void *ctx) { - int ret; struct send_ctx *sctx = ctx; - struct fs_path *p; struct posix_acl_xattr_header dummy_acl;
/* Capabilities are emitted by finish_inode_if_needed */ if (!strncmp(name, XATTR_NAME_CAPS, name_len)) return 0;
- p = fs_path_alloc(); - if (!p) - return -ENOMEM; - /* * This hack is needed because empty acls are stored as zero byte * data in xattrs. Problem with that is, that receiving these zero byte @@ -4953,38 +5023,21 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key, } }
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); - if (ret < 0) - goto out; - - ret = send_set_xattr(sctx, p, name, name_len, data, data_len); - -out: - fs_path_free(p); - return ret; + return send_set_xattr(sctx, name, name_len, data, data_len); }
static int __process_deleted_xattr(int num, struct btrfs_key *di_key, const char *name, int name_len, const char *data, int data_len, void *ctx) { - int ret; struct send_ctx *sctx = ctx; struct fs_path *p;
- p = fs_path_alloc(); - if (!p) - return -ENOMEM; - - ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); - if (ret < 0) - goto out; - - ret = send_remove_xattr(sctx, p, name, name_len); + p = get_cur_inode_path(sctx); + if (IS_ERR(p)) + return PTR_ERR(p);
-out: - fs_path_free(p); - return ret; + return send_remove_xattr(sctx, p, name, name_len); }
static int process_new_xattr(struct send_ctx *sctx) @@ -5218,21 +5271,13 @@ static int process_verity(struct send_ctx *sctx) if (ret < 0) goto iput;
- p = fs_path_alloc(); - if (!p) { - ret = -ENOMEM; + p = get_cur_inode_path(sctx); + if (IS_ERR(p)) { + ret = PTR_ERR(p); goto iput; } - ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); - if (ret < 0) - goto free_path;
ret = send_verity(sctx, p, sctx->verity_descriptor); - if (ret < 0) - goto free_path; - -free_path: - fs_path_free(p); iput: iput(inode); return ret; @@ -5347,31 +5392,25 @@ static int send_write(struct send_ctx *sctx, u64 offset, u32 len) int ret = 0; struct fs_path *p;
- p = fs_path_alloc(); - if (!p) - return -ENOMEM; - btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
- ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); - if (ret < 0) - goto out; + p = get_cur_inode_path(sctx); + if (IS_ERR(p)) + return PTR_ERR(p);
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); + ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); if (ret < 0) - goto out; + return ret;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); ret = put_file_data(sctx, offset, len); if (ret < 0) - goto out; + return ret;
ret = send_cmd(sctx);
tlv_put_failure: -out: - fs_path_free(p); return ret; }
@@ -5384,6 +5423,7 @@ static int send_clone(struct send_ctx *sctx, { int ret = 0; struct fs_path *p; + struct fs_path *cur_inode_path; u64 gen;
btrfs_debug(sctx->send_root->fs_info, @@ -5391,6 +5431,10 @@ static int send_clone(struct send_ctx *sctx, offset, len, clone_root->root->root_key.objectid, clone_root->ino, clone_root->offset);
+ cur_inode_path = get_cur_inode_path(sctx); + if (IS_ERR(cur_inode_path)) + return PTR_ERR(cur_inode_path); + p = fs_path_alloc(); if (!p) return -ENOMEM; @@ -5399,13 +5443,9 @@ static int send_clone(struct send_ctx *sctx, if (ret < 0) goto out;
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); - if (ret < 0) - goto out; - TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len); - TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); + TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, cur_inode_path);
if (clone_root->root == sctx->send_root) { ret = get_inode_gen(sctx->send_root, clone_root->ino, &gen); @@ -5456,27 +5496,45 @@ static int send_update_extent(struct send_ctx *sctx, int ret = 0; struct fs_path *p;
- p = fs_path_alloc(); - if (!p) - return -ENOMEM; + p = get_cur_inode_path(sctx); + if (IS_ERR(p)) + return PTR_ERR(p);
ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT); if (ret < 0) - goto out; + return ret; + + TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); + TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); + TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len); + + ret = send_cmd(sctx);
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); +tlv_put_failure: + return ret; +} + +static int send_fallocate(struct send_ctx *sctx, u32 mode, u64 offset, u64 len) +{ + struct fs_path *path; + int ret; + + path = get_cur_inode_path(sctx); + if (IS_ERR(path)) + return PTR_ERR(path); + + ret = begin_cmd(sctx, BTRFS_SEND_C_FALLOCATE); if (ret < 0) - goto out; + return ret;
- TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); + TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); + TLV_PUT_U32(sctx, BTRFS_SEND_A_FALLOCATE_MODE, mode); TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
ret = send_cmd(sctx);
tlv_put_failure: -out: - fs_path_free(p); return ret; }
@@ -5487,6 +5545,14 @@ static int send_hole(struct send_ctx *sctx, u64 end) u64 offset = sctx->cur_inode_last_extent; int ret = 0;
+ /* + * Starting with send stream v2 we have fallocate and can use it to + * punch holes instead of sending writes full of zeroes. + */ + if (proto_cmd_ok(sctx, BTRFS_SEND_C_FALLOCATE)) + return send_fallocate(sctx, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, + offset, end - offset); + /* * A hole that starts at EOF or beyond it. Since we do not yet support * fallocate (for extent preallocation and hole punching), sending a @@ -5505,12 +5571,10 @@ static int send_hole(struct send_ctx *sctx, u64 end) if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) return send_update_extent(sctx, offset, end - offset);
- p = fs_path_alloc(); - if (!p) - return -ENOMEM; - ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); - if (ret < 0) - goto tlv_put_failure; + p = get_cur_inode_path(sctx); + if (IS_ERR(p)) + return PTR_ERR(p); + while (offset < end) { u64 len = min(end - offset, read_size);
@@ -5531,7 +5595,6 @@ static int send_hole(struct send_ctx *sctx, u64 end) } sctx->cur_inode_next_write_offset = offset; tlv_put_failure: - fs_path_free(p); return ret; }
@@ -5554,9 +5617,9 @@ static int send_encoded_inline_extent(struct send_ctx *sctx, if (IS_ERR(inode)) return PTR_ERR(inode);
- fspath = fs_path_alloc(); - if (!fspath) { - ret = -ENOMEM; + fspath = get_cur_inode_path(sctx); + if (IS_ERR(fspath)) { + ret = PTR_ERR(fspath); goto out; }
@@ -5564,10 +5627,6 @@ static int send_encoded_inline_extent(struct send_ctx *sctx, if (ret < 0) goto out;
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); - if (ret < 0) - goto out; - btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); ram_bytes = btrfs_file_extent_ram_bytes(leaf, ei); @@ -5596,7 +5655,6 @@ static int send_encoded_inline_extent(struct send_ctx *sctx,
tlv_put_failure: out: - fs_path_free(fspath); iput(inode); return ret; } @@ -5621,9 +5679,9 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path, if (IS_ERR(inode)) return PTR_ERR(inode);
- fspath = fs_path_alloc(); - if (!fspath) { - ret = -ENOMEM; + fspath = get_cur_inode_path(sctx); + if (IS_ERR(fspath)) { + ret = PTR_ERR(fspath); goto out; }
@@ -5631,10 +5689,6 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path, if (ret < 0) goto out;
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); - if (ret < 0) - goto out; - btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei); @@ -5701,7 +5755,6 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
tlv_put_failure: out: - fs_path_free(fspath); iput(inode); return ret; } @@ -5831,7 +5884,6 @@ static int send_extent_data(struct send_ctx *sctx, struct btrfs_path *path, */ static int send_capabilities(struct send_ctx *sctx) { - struct fs_path *fspath = NULL; struct btrfs_path *path; struct btrfs_dir_item *di; struct extent_buffer *leaf; @@ -5857,25 +5909,19 @@ static int send_capabilities(struct send_ctx *sctx) leaf = path->nodes[0]; buf_len = btrfs_dir_data_len(leaf, di);
- fspath = fs_path_alloc(); buf = kmalloc(buf_len, GFP_KERNEL); - if (!fspath || !buf) { + if (!buf) { ret = -ENOMEM; goto out; }
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); - if (ret < 0) - goto out; - data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di); read_extent_buffer(leaf, buf, data_ptr, buf_len);
- ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS, + ret = send_set_xattr(sctx, XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), buf, buf_len); out: kfree(buf); - fs_path_free(fspath); btrfs_free_path(path); return ret; } @@ -6904,6 +6950,7 @@ static int changed_inode(struct send_ctx *sctx, sctx->cur_inode_last_extent = (u64)-1; sctx->cur_inode_next_write_offset = 0; sctx->ignore_cur_inode = false; + fs_path_reset(&sctx->cur_inode_path);
/* * Set send_progress to current inode. This will tell all get_cur_xxx @@ -8194,6 +8241,7 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg) goto out; }
+ init_path(&sctx->cur_inode_path); INIT_LIST_HEAD(&sctx->new_refs); INIT_LIST_HEAD(&sctx->deleted_refs);
@@ -8479,6 +8527,9 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg) btrfs_lru_cache_clear(&sctx->dir_created_cache); btrfs_lru_cache_clear(&sctx->dir_utimes_cache);
+ if (sctx->cur_inode_path.buf != sctx->cur_inode_path.inline_buf) + kfree(sctx->cur_inode_path.buf); + kfree(sctx); }
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index 581bdd709ee0..27690c518f6d 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -162,7 +162,7 @@ * thing with or without extra unallocated space. */
-u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, +u64 __pure btrfs_space_info_used(const struct btrfs_space_info *s_info, bool may_use_included) { ASSERT(s_info); @@ -342,7 +342,7 @@ struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, }
static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, + const struct btrfs_space_info *space_info, enum btrfs_reserve_flush_enum flush) { u64 profile; @@ -378,7 +378,7 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, }
int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, u64 bytes, + const struct btrfs_space_info *space_info, u64 bytes, enum btrfs_reserve_flush_enum flush) { u64 avail; @@ -483,8 +483,8 @@ static void dump_global_block_rsv(struct btrfs_fs_info *fs_info) DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv); }
-static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *info) +static void __btrfs_dump_space_info(const struct btrfs_fs_info *fs_info, + const struct btrfs_space_info *info) { const char *flag_str = space_info_flag_to_str(info); lockdep_assert_held(&info->lock); @@ -807,9 +807,8 @@ static void flush_space(struct btrfs_fs_info *fs_info, return; }
-static inline u64 -btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info) +static u64 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, + const struct btrfs_space_info *space_info) { u64 used; u64 avail; @@ -834,7 +833,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, }
static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info) + const struct btrfs_space_info *space_info) { const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv); u64 ordered, delalloc; diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h index 08a3bd10addc..b0187f25dbb5 100644 --- a/fs/btrfs/space-info.h +++ b/fs/btrfs/space-info.h @@ -165,7 +165,7 @@ struct reserve_ticket { wait_queue_head_t wait; };
-static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) +static inline bool btrfs_mixed_space_info(const struct btrfs_space_info *space_info) { return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) && (space_info->flags & BTRFS_BLOCK_GROUP_DATA)); @@ -206,7 +206,7 @@ void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info, u64 chunk_size); struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, u64 flags); -u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, +u64 __pure btrfs_space_info_used(const struct btrfs_space_info *s_info, bool may_use_included); void btrfs_clear_space_info_full(struct btrfs_fs_info *info); void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, @@ -219,7 +219,7 @@ int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info, void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info); int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, u64 bytes, + const struct btrfs_space_info *space_info, u64 bytes, enum btrfs_reserve_flush_enum flush);
static inline void btrfs_space_info_free_bytes_may_use( diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 16434106c465..9439abf415ae 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -326,8 +326,7 @@ struct walk_control {
/* * Ignore any items from the inode currently being processed. Needs - * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in - * the LOG_WALK_REPLAY_INODES stage. + * to be set every time we find a BTRFS_INODE_ITEM_KEY. */ bool ignore_cur_inode;
@@ -1423,6 +1422,8 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, btrfs_dir = btrfs_iget_logging(parent_objectid, root); if (IS_ERR(btrfs_dir)) { ret = PTR_ERR(btrfs_dir); + if (ret == -ENOENT) + ret = 0; dir = NULL; goto out; } @@ -1456,6 +1457,15 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, if (IS_ERR(btrfs_dir)) { ret = PTR_ERR(btrfs_dir); dir = NULL; + /* + * A new parent dir may have not been + * logged and not exist in the subvolume + * tree, see the comment above before + * the loop when getting the first + * parent dir. + */ + if (ret == -ENOENT) + ret = 0; goto out; } dir = &btrfs_dir->vfs_inode; @@ -2498,23 +2508,30 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
nritems = btrfs_header_nritems(eb); for (i = 0; i < nritems; i++) { - btrfs_item_key_to_cpu(eb, &key, i); + struct btrfs_inode_item *inode_item;
- /* inode keys are done during the first stage */ - if (key.type == BTRFS_INODE_ITEM_KEY && - wc->stage == LOG_WALK_REPLAY_INODES) { - struct btrfs_inode_item *inode_item; - u32 mode; + btrfs_item_key_to_cpu(eb, &key, i);
- inode_item = btrfs_item_ptr(eb, i, - struct btrfs_inode_item); + if (key.type == BTRFS_INODE_ITEM_KEY) { + inode_item = btrfs_item_ptr(eb, i, struct btrfs_inode_item); /* - * If we have a tmpfile (O_TMPFILE) that got fsync'ed - * and never got linked before the fsync, skip it, as - * replaying it is pointless since it would be deleted - * later. We skip logging tmpfiles, but it's always - * possible we are replaying a log created with a kernel - * that used to log tmpfiles. + * An inode with no links is either: + * + * 1) A tmpfile (O_TMPFILE) that got fsync'ed and never + * got linked before the fsync, skip it, as replaying + * it is pointless since it would be deleted later. + * We skip logging tmpfiles, but it's always possible + * we are replaying a log created with a kernel that + * used to log tmpfiles; + * + * 2) A non-tmpfile which got its last link deleted + * while holding an open fd on it and later got + * fsynced through that fd. We always log the + * parent inodes when inode->last_unlink_trans is + * set to the current transaction, so ignore all the + * inode items for this inode. We will delete the + * inode when processing the parent directory with + * replay_dir_deletes(). */ if (btrfs_inode_nlink(eb, inode_item) == 0) { wc->ignore_cur_inode = true; @@ -2522,8 +2539,14 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, } else { wc->ignore_cur_inode = false; } - ret = replay_xattr_deletes(wc->trans, root, log, - path, key.objectid); + } + + /* Inode keys are done during the first stage. */ + if (key.type == BTRFS_INODE_ITEM_KEY && + wc->stage == LOG_WALK_REPLAY_INODES) { + u32 mode; + + ret = replay_xattr_deletes(wc->trans, root, log, path, key.objectid); if (ret) break; mode = btrfs_inode_mode(eb, inode_item); @@ -2611,9 +2634,8 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, key.type == BTRFS_INODE_EXTREF_KEY) { ret = add_inode_ref(wc->trans, root, log, path, eb, i, &key); - if (ret && ret != -ENOENT) + if (ret) break; - ret = 0; } else if (key.type == BTRFS_EXTENT_DATA_KEY) { ret = replay_one_extent(wc->trans, root, path, eb, i, &key); @@ -4243,6 +4265,9 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, btrfs_set_token_timespec_nsec(&token, &item->ctime, inode_get_ctime(inode).tv_nsec);
+ btrfs_set_timespec_sec(leaf, &item->otime, BTRFS_I(inode)->i_otime_sec); + btrfs_set_timespec_nsec(leaf, &item->otime, BTRFS_I(inode)->i_otime_nsec); + /* * We do not need to set the nbytes field, in fact during a fast fsync * its value may not even be correct, since a fast fsync does not wait @@ -7300,11 +7325,14 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
wc.replay_dest->log_root = log; ret = btrfs_record_root_in_trans(trans, wc.replay_dest); - if (ret) + if (ret) { /* The loop needs to continue due to the root refs */ btrfs_abort_transaction(trans, ret); - else + } else { ret = walk_log_tree(trans, log, &wc); + if (ret) + btrfs_abort_transaction(trans, ret); + }
if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { ret = fixup_inode_link_counts(trans, wc.replay_dest, diff --git a/fs/btrfs/tree-mod-log.c b/fs/btrfs/tree-mod-log.c index 3df6153d5d5a..febc014a510d 100644 --- a/fs/btrfs/tree-mod-log.c +++ b/fs/btrfs/tree-mod-log.c @@ -171,7 +171,7 @@ static noinline int tree_mod_log_insert(struct btrfs_fs_info *fs_info, * write unlock fs_info::tree_mod_log_lock. */ static inline bool tree_mod_dont_log(struct btrfs_fs_info *fs_info, - struct extent_buffer *eb) + const struct extent_buffer *eb) { if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)) return true; @@ -189,7 +189,7 @@ static inline bool tree_mod_dont_log(struct btrfs_fs_info *fs_info,
/* Similar to tree_mod_dont_log, but doesn't acquire any locks. */ static inline bool tree_mod_need_log(const struct btrfs_fs_info *fs_info, - struct extent_buffer *eb) + const struct extent_buffer *eb) { if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)) return false; @@ -199,7 +199,7 @@ static inline bool tree_mod_need_log(const struct btrfs_fs_info *fs_info, return true; }
-static struct tree_mod_elem *alloc_tree_mod_elem(struct extent_buffer *eb, +static struct tree_mod_elem *alloc_tree_mod_elem(const struct extent_buffer *eb, int slot, enum btrfs_mod_log_op op) { @@ -222,7 +222,7 @@ static struct tree_mod_elem *alloc_tree_mod_elem(struct extent_buffer *eb, return tm; }
-int btrfs_tree_mod_log_insert_key(struct extent_buffer *eb, int slot, +int btrfs_tree_mod_log_insert_key(const struct extent_buffer *eb, int slot, enum btrfs_mod_log_op op) { struct tree_mod_elem *tm; @@ -259,7 +259,7 @@ int btrfs_tree_mod_log_insert_key(struct extent_buffer *eb, int slot, return ret; }
-static struct tree_mod_elem *tree_mod_log_alloc_move(struct extent_buffer *eb, +static struct tree_mod_elem *tree_mod_log_alloc_move(const struct extent_buffer *eb, int dst_slot, int src_slot, int nr_items) { @@ -279,7 +279,7 @@ static struct tree_mod_elem *tree_mod_log_alloc_move(struct extent_buffer *eb, return tm; }
-int btrfs_tree_mod_log_insert_move(struct extent_buffer *eb, +int btrfs_tree_mod_log_insert_move(const struct extent_buffer *eb, int dst_slot, int src_slot, int nr_items) { @@ -536,7 +536,7 @@ static struct tree_mod_elem *tree_mod_log_search(struct btrfs_fs_info *fs_info, }
int btrfs_tree_mod_log_eb_copy(struct extent_buffer *dst, - struct extent_buffer *src, + const struct extent_buffer *src, unsigned long dst_offset, unsigned long src_offset, int nr_items) diff --git a/fs/btrfs/tree-mod-log.h b/fs/btrfs/tree-mod-log.h index 94f10afeee97..5f94ab681fa4 100644 --- a/fs/btrfs/tree-mod-log.h +++ b/fs/btrfs/tree-mod-log.h @@ -31,7 +31,7 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, int btrfs_tree_mod_log_insert_root(struct extent_buffer *old_root, struct extent_buffer *new_root, bool log_removal); -int btrfs_tree_mod_log_insert_key(struct extent_buffer *eb, int slot, +int btrfs_tree_mod_log_insert_key(const struct extent_buffer *eb, int slot, enum btrfs_mod_log_op op); int btrfs_tree_mod_log_free_eb(struct extent_buffer *eb); struct extent_buffer *btrfs_tree_mod_log_rewind(struct btrfs_fs_info *fs_info, @@ -41,11 +41,11 @@ struct extent_buffer *btrfs_tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *btrfs_get_old_root(struct btrfs_root *root, u64 time_seq); int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq); int btrfs_tree_mod_log_eb_copy(struct extent_buffer *dst, - struct extent_buffer *src, + const struct extent_buffer *src, unsigned long dst_offset, unsigned long src_offset, int nr_items); -int btrfs_tree_mod_log_insert_move(struct extent_buffer *eb, +int btrfs_tree_mod_log_insert_move(const struct extent_buffer *eb, int dst_slot, int src_slot, int nr_items); u64 btrfs_tree_mod_log_lowest_seq(struct btrfs_fs_info *fs_info); diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index 197dfafbf401..3622ba1d8e09 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -1992,10 +1992,15 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group) goto out_unlock; }
- /* No space left */ - if (btrfs_zoned_bg_is_full(block_group)) { - ret = false; - goto out_unlock; + if (block_group->flags & BTRFS_BLOCK_GROUP_DATA) { + /* The caller should check if the block group is full. */ + if (WARN_ON_ONCE(btrfs_zoned_bg_is_full(block_group))) { + ret = false; + goto out_unlock; + } + } else { + /* Since it is already written, it should have been active. */ + WARN_ON_ONCE(block_group->meta_write_pointer != block_group->start); }
for (i = 0; i < map->num_stripes; i++) { @@ -2346,12 +2351,12 @@ void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info) mutex_unlock(&fs_devices->device_list_mutex); }
-bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info) +bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info) { struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct btrfs_device *device; + u64 total = btrfs_super_total_bytes(fs_info->super_copy); u64 used = 0; - u64 total = 0; u64 factor;
ASSERT(btrfs_is_zoned(fs_info)); @@ -2364,7 +2369,6 @@ bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info) if (!device->bdev) continue;
- total += device->disk_total_bytes; used += device->bytes_used; } mutex_unlock(&fs_devices->device_list_mutex); @@ -2418,7 +2422,7 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
spin_lock(&block_group->lock); if (block_group->reserved || block_group->alloc_offset == 0 || - (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) || + !(block_group->flags & BTRFS_BLOCK_GROUP_DATA) || test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) { spin_unlock(&block_group->lock); continue; diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h index b9cec523b778..448955641d11 100644 --- a/fs/btrfs/zoned.h +++ b/fs/btrfs/zoned.h @@ -77,7 +77,7 @@ void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg, struct extent_buffer *eb); void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg); void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info); -bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info); +bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info); void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical, u64 length); int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info); @@ -237,7 +237,7 @@ static inline void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg) { }
static inline void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info) { }
-static inline bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info) +static inline bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info) { return false; } diff --git a/fs/buffer.c b/fs/buffer.c index 4b86e971efd8..32df6163ffed 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -157,8 +157,8 @@ static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) */ void end_buffer_read_sync(struct buffer_head *bh, int uptodate) { - __end_buffer_read_notouch(bh, uptodate); put_bh(bh); + __end_buffer_read_notouch(bh, uptodate); } EXPORT_SYMBOL(end_buffer_read_sync);
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 2d63da48635a..14b26036055e 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -27,6 +27,22 @@ */ #define FSCRYPT_MIN_KEY_SIZE 16
+/* + * This mask is passed as the third argument to the crypto_alloc_*() functions + * to prevent fscrypt from using the Crypto API drivers for non-inline crypto + * engines. Those drivers have been problematic for fscrypt. fscrypt users + * have reported hangs and even incorrect en/decryption with these drivers. + * Since going to the driver, off CPU, and back again is really slow, such + * drivers can be over 50 times slower than the CPU-based code for fscrypt's + * workload. Even on platforms that lack AES instructions on the CPU, using the + * offloads has been shown to be slower, even staying with AES. (Of course, + * Adiantum is faster still, and is the recommended option on such platforms...) + * + * Note that fscrypt also supports inline crypto engines. Those don't use the + * Crypto API and work much better than the old-style (non-inline) engines. + */ +#define FSCRYPT_CRYPTOAPI_MASK \ + (CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY) #define FSCRYPT_CONTEXT_V1 1 #define FSCRYPT_CONTEXT_V2 2
diff --git a/fs/crypto/hkdf.c b/fs/crypto/hkdf.c index 5a384dad2c72..b7f5e7884e03 100644 --- a/fs/crypto/hkdf.c +++ b/fs/crypto/hkdf.c @@ -72,7 +72,7 @@ int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key, u8 prk[HKDF_HASHLEN]; int err;
- hmac_tfm = crypto_alloc_shash(HKDF_HMAC_ALG, 0, 0); + hmac_tfm = crypto_alloc_shash(HKDF_HMAC_ALG, 0, FSCRYPT_CRYPTOAPI_MASK); if (IS_ERR(hmac_tfm)) { fscrypt_err(NULL, "Error allocating " HKDF_HMAC_ALG ": %ld", PTR_ERR(hmac_tfm)); diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index 361f41ef46c7..2348fc2a47f8 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -103,7 +103,8 @@ fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key, struct crypto_skcipher *tfm; int err;
- tfm = crypto_alloc_skcipher(mode->cipher_str, 0, 0); + tfm = crypto_alloc_skcipher(mode->cipher_str, 0, + FSCRYPT_CRYPTOAPI_MASK); if (IS_ERR(tfm)) { if (PTR_ERR(tfm) == -ENOENT) { fscrypt_warn(inode, diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c index 75dabd9b27f9..159dd0288349 100644 --- a/fs/crypto/keysetup_v1.c +++ b/fs/crypto/keysetup_v1.c @@ -52,7 +52,8 @@ static int derive_key_aes(const u8 *master_key, struct skcipher_request *req = NULL; DECLARE_CRYPTO_WAIT(wait); struct scatterlist src_sg, dst_sg; - struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); + struct crypto_skcipher *tfm = + crypto_alloc_skcipher("ecb(aes)", 0, FSCRYPT_CRYPTOAPI_MASK);
if (IS_ERR(tfm)) { res = PTR_ERR(tfm); diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 31b32d9e7bbc..6b2d655c1cef 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -217,6 +217,7 @@ struct eventpoll { /* used to optimize loop detection check */ u64 gen; struct hlist_head refs; + u8 loop_check_depth;
/* * usage count, used together with epitem->dying to @@ -1986,23 +1987,24 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, }
/** - * ep_loop_check_proc - verify that adding an epoll file inside another - * epoll structure does not violate the constraints, in - * terms of closed loops, or too deep chains (which can - * result in excessive stack usage). + * ep_loop_check_proc - verify that adding an epoll file @ep inside another + * epoll file does not create closed loops, and + * determine the depth of the subtree starting at @ep * * @ep: the &struct eventpoll to be currently checked. * @depth: Current depth of the path being checked. * - * Return: %zero if adding the epoll @file inside current epoll - * structure @ep does not violate the constraints, or %-1 otherwise. + * Return: depth of the subtree, or INT_MAX if we found a loop or went too deep. */ static int ep_loop_check_proc(struct eventpoll *ep, int depth) { - int error = 0; + int result = 0; struct rb_node *rbp; struct epitem *epi;
+ if (ep->gen == loop_check_gen) + return ep->loop_check_depth; + mutex_lock_nested(&ep->mtx, depth + 1); ep->gen = loop_check_gen; for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) { @@ -2010,13 +2012,11 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth) if (unlikely(is_file_epoll(epi->ffd.file))) { struct eventpoll *ep_tovisit; ep_tovisit = epi->ffd.file->private_data; - if (ep_tovisit->gen == loop_check_gen) - continue; if (ep_tovisit == inserting_into || depth > EP_MAX_NESTS) - error = -1; + result = INT_MAX; else - error = ep_loop_check_proc(ep_tovisit, depth + 1); - if (error != 0) + result = max(result, ep_loop_check_proc(ep_tovisit, depth + 1) + 1); + if (result > EP_MAX_NESTS) break; } else { /* @@ -2030,9 +2030,27 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth) list_file(epi->ffd.file); } } + ep->loop_check_depth = result; mutex_unlock(&ep->mtx);
- return error; + return result; +} + +/** + * ep_get_upwards_depth_proc - determine depth of @ep when traversed upwards + */ +static int ep_get_upwards_depth_proc(struct eventpoll *ep, int depth) +{ + int result = 0; + struct epitem *epi; + + if (ep->gen == loop_check_gen) + return ep->loop_check_depth; + hlist_for_each_entry_rcu(epi, &ep->refs, fllink) + result = max(result, ep_get_upwards_depth_proc(epi->ep, depth + 1) + 1); + ep->gen = loop_check_gen; + ep->loop_check_depth = result; + return result; }
/** @@ -2048,8 +2066,22 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth) */ static int ep_loop_check(struct eventpoll *ep, struct eventpoll *to) { + int depth, upwards_depth; + inserting_into = ep; - return ep_loop_check_proc(to, 0); + /* + * Check how deep down we can get from @to, and whether it is possible + * to loop up to @ep. + */ + depth = ep_loop_check_proc(to, 0); + if (depth > EP_MAX_NESTS) + return -1; + /* Check how far up we can go from @ep. */ + rcu_read_lock(); + upwards_depth = ep_get_upwards_depth_proc(ep, 0); + rcu_read_unlock(); + + return (depth+1+upwards_depth > EP_MAX_NESTS) ? -1 : 0; }
static void clear_tfile_check_list(void) diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c index f4f81e349cef..6139a57fde70 100644 --- a/fs/exfat/dir.c +++ b/fs/exfat/dir.c @@ -994,6 +994,7 @@ int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei, struct exfat_hint_femp candi_empty; struct exfat_sb_info *sbi = EXFAT_SB(sb); int num_entries = exfat_calc_num_entries(p_uniname); + unsigned int clu_count = 0;
if (num_entries < 0) return num_entries; @@ -1131,6 +1132,10 @@ int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei, } else { if (exfat_get_next_cluster(sb, &clu.dir)) return -EIO; + + /* break if the cluster chain includes a loop */ + if (unlikely(++clu_count > EXFAT_DATA_CLUSTER_COUNT(sbi))) + goto not_found; } }
@@ -1214,6 +1219,7 @@ int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir) int i, count = 0; int dentries_per_clu; unsigned int entry_type; + unsigned int clu_count = 0; struct exfat_chain clu; struct exfat_dentry *ep; struct exfat_sb_info *sbi = EXFAT_SB(sb); @@ -1246,6 +1252,12 @@ int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir) } else { if (exfat_get_next_cluster(sb, &(clu.dir))) return -EIO; + + if (unlikely(++clu_count > sbi->used_clusters)) { + exfat_fs_error(sb, "FAT or bitmap is corrupted"); + return -EIO; + } + } }
diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c index 24e1e05f9f34..407880901ee3 100644 --- a/fs/exfat/fatent.c +++ b/fs/exfat/fatent.c @@ -461,5 +461,15 @@ int exfat_count_num_clusters(struct super_block *sb, }
*ret_count = count; + + /* + * since exfat_count_used_clusters() is not called, sbi->used_clusters + * cannot be used here. + */ + if (unlikely(i == sbi->num_clusters && clu != EXFAT_EOF_CLUSTER)) { + exfat_fs_error(sb, "The cluster chain has a loop"); + return -EIO; + } + return 0; } diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c index f340e96b499f..4657f893dea7 100644 --- a/fs/exfat/namei.c +++ b/fs/exfat/namei.c @@ -893,6 +893,7 @@ static int exfat_check_dir_empty(struct super_block *sb, { int i, dentries_per_clu; unsigned int type; + unsigned int clu_count = 0; struct exfat_chain clu; struct exfat_dentry *ep; struct exfat_sb_info *sbi = EXFAT_SB(sb); @@ -929,6 +930,10 @@ static int exfat_check_dir_empty(struct super_block *sb, } else { if (exfat_get_next_cluster(sb, &(clu.dir))) return -EIO; + + /* break if the cluster chain includes a loop */ + if (unlikely(++clu_count > EXFAT_DATA_CLUSTER_COUNT(sbi))) + break; } }
diff --git a/fs/exfat/super.c b/fs/exfat/super.c index 2778bd9b631e..5affc11d1461 100644 --- a/fs/exfat/super.c +++ b/fs/exfat/super.c @@ -327,13 +327,12 @@ static void exfat_hash_init(struct super_block *sb) INIT_HLIST_HEAD(&sbi->inode_hashtable[i]); }
-static int exfat_read_root(struct inode *inode) +static int exfat_read_root(struct inode *inode, struct exfat_chain *root_clu) { struct super_block *sb = inode->i_sb; struct exfat_sb_info *sbi = EXFAT_SB(sb); struct exfat_inode_info *ei = EXFAT_I(inode); - struct exfat_chain cdir; - int num_subdirs, num_clu = 0; + int num_subdirs;
exfat_chain_set(&ei->dir, sbi->root_dir, 0, ALLOC_FAT_CHAIN); ei->entry = -1; @@ -346,12 +345,9 @@ static int exfat_read_root(struct inode *inode) ei->hint_stat.clu = sbi->root_dir; ei->hint_femp.eidx = EXFAT_HINT_NONE;
- exfat_chain_set(&cdir, sbi->root_dir, 0, ALLOC_FAT_CHAIN); - if (exfat_count_num_clusters(sb, &cdir, &num_clu)) - return -EIO; - i_size_write(inode, num_clu << sbi->cluster_size_bits); + i_size_write(inode, EXFAT_CLU_TO_B(root_clu->size, sbi));
- num_subdirs = exfat_count_dir_entries(sb, &cdir); + num_subdirs = exfat_count_dir_entries(sb, root_clu); if (num_subdirs < 0) return -EIO; set_nlink(inode, num_subdirs + EXFAT_MIN_SUBDIR); @@ -567,7 +563,8 @@ static int exfat_verify_boot_region(struct super_block *sb) }
/* mount the file system volume */ -static int __exfat_fill_super(struct super_block *sb) +static int __exfat_fill_super(struct super_block *sb, + struct exfat_chain *root_clu) { int ret; struct exfat_sb_info *sbi = EXFAT_SB(sb); @@ -584,6 +581,18 @@ static int __exfat_fill_super(struct super_block *sb) goto free_bh; }
+ /* + * Call exfat_count_num_cluster() before searching for up-case and + * bitmap directory entries to avoid infinite loop if they are missing + * and the cluster chain includes a loop. + */ + exfat_chain_set(root_clu, sbi->root_dir, 0, ALLOC_FAT_CHAIN); + ret = exfat_count_num_clusters(sb, root_clu, &root_clu->size); + if (ret) { + exfat_err(sb, "failed to count the number of clusters in root"); + goto free_bh; + } + ret = exfat_create_upcase_table(sb); if (ret) { exfat_err(sb, "failed to load upcase table"); @@ -618,6 +627,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc) struct exfat_sb_info *sbi = sb->s_fs_info; struct exfat_mount_options *opts = &sbi->options; struct inode *root_inode; + struct exfat_chain root_clu; int err;
if (opts->allow_utime == (unsigned short)-1) @@ -636,7 +646,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_time_min = EXFAT_MIN_TIMESTAMP_SECS; sb->s_time_max = EXFAT_MAX_TIMESTAMP_SECS;
- err = __exfat_fill_super(sb); + err = __exfat_fill_super(sb, &root_clu); if (err) { exfat_err(sb, "failed to recognize exfat type"); goto check_nls_io; @@ -671,7 +681,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
root_inode->i_ino = EXFAT_ROOT_INO; inode_set_iversion(root_inode, 1); - err = exfat_read_root(root_inode); + err = exfat_read_root(root_inode, &root_clu); if (err) { exfat_err(sb, "failed to initialize root inode"); goto put_inode; diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 314b415ee518..6ff1f8f29a3c 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -895,9 +895,19 @@ int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len) { int ret; + loff_t i_size;
inode_lock(inode); - len = min_t(u64, len, i_size_read(inode)); + i_size = i_size_read(inode); + /* + * iomap_fiemap() returns EINVAL for 0 length. Make sure we don't trim + * length to 0 but still trim the range as much as possible since + * ext2_get_blocks() iterates unmapped space block by block which is + * slow. + */ + if (i_size == 0) + i_size = 1; + len = min_t(u64, len, i_size); ret = iomap_fiemap(inode, fieinfo, start, len, &ext2_iomap_ops); inode_unlock(inode);
diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c index 53a05b8292f0..1b68586f73f3 100644 --- a/fs/ext4/fsmap.c +++ b/fs/ext4/fsmap.c @@ -393,6 +393,14 @@ static unsigned int ext4_getfsmap_find_sb(struct super_block *sb, /* Reserved GDT blocks */ if (!ext4_has_feature_meta_bg(sb) || metagroup < first_meta_bg) { len = le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks); + + /* + * mkfs.ext4 can set s_reserved_gdt_blocks as 0 in some cases, + * check for that. + */ + if (!len) + return 0; + error = ext4_getfsmap_fill(meta_list, fsb, len, EXT4_FMR_OWN_RESV_GDT); if (error) @@ -526,6 +534,7 @@ static int ext4_getfsmap_datadev(struct super_block *sb, ext4_group_t end_ag; ext4_grpblk_t first_cluster; ext4_grpblk_t last_cluster; + struct ext4_fsmap irec; int error = 0;
bofs = le32_to_cpu(sbi->s_es->s_first_data_block); @@ -609,10 +618,18 @@ static int ext4_getfsmap_datadev(struct super_block *sb, goto err; }
- /* Report any gaps at the end of the bg */ + /* + * The dummy record below will cause ext4_getfsmap_helper() to report + * any allocated blocks at the end of the range. + */ + irec.fmr_device = 0; + irec.fmr_physical = end_fsb + 1; + irec.fmr_length = 0; + irec.fmr_owner = EXT4_FMR_OWN_FREE; + irec.fmr_flags = 0; + info->gfi_last = true; - error = ext4_getfsmap_datadev_helper(sb, end_ag, last_cluster + 1, - 0, info); + error = ext4_getfsmap_helper(sb, info, &irec); if (error) goto err;
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index f2c495b745f1..d18a5bee1021 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -539,7 +539,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, int indirect_blks; int blocks_to_boundary = 0; int depth; - int count = 0; + u64 count = 0; ext4_fsblk_t first_block = 0;
trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); @@ -588,7 +588,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, count++; /* Fill in size of a hole we found */ map->m_pblk = 0; - map->m_len = min_t(unsigned int, map->m_len, count); + map->m_len = umin(map->m_len, count); goto cleanup; }
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index c85647a0ba09..5fa1dd58ac42 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -298,7 +298,11 @@ static int ext4_create_inline_data(handle_t *handle, if (error) goto out;
- BUG_ON(!is.s.not_found); + if (!is.s.not_found) { + EXT4_ERROR_INODE(inode, "unexpected inline data xattr"); + error = -EFSCORRUPTED; + goto out; + }
error = ext4_xattr_ibody_set(handle, inode, &i, &is); if (error) { @@ -349,7 +353,11 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode, if (error) goto out;
- BUG_ON(is.s.not_found); + if (is.s.not_found) { + EXT4_ERROR_INODE(inode, "missing inline data xattr"); + error = -EFSCORRUPTED; + goto out; + }
len -= EXT4_MIN_INLINE_DATA_SIZE; value = kzalloc(len, GFP_NOFS); @@ -1966,7 +1974,12 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline) if ((err = ext4_xattr_ibody_find(inode, &i, &is)) != 0) goto out_error;
- BUG_ON(is.s.not_found); + if (is.s.not_found) { + EXT4_ERROR_INODE(inode, + "missing inline data xattr"); + err = -EFSCORRUPTED; + goto out_error; + }
value_len = le32_to_cpu(is.s.here->e_value_size); value = kmalloc(value_len, GFP_NOFS); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 9694ef6b996e..886d4dfa737a 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -146,7 +146,7 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, */ int ext4_inode_is_fast_symlink(struct inode *inode) { - if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { + if (!ext4_has_feature_ea_inode(inode->i_sb)) { int ea_blocks = EXT4_I(inode)->i_file_acl ? EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 8a9f8c95c6f1..c5f642096ab4 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -841,30 +841,30 @@ static void mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp) { struct ext4_sb_info *sbi = EXT4_SB(sb); - int new_order; + int new, old;
- if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0) + if (!test_opt2(sb, MB_OPTIMIZE_SCAN)) return;
- new_order = mb_avg_fragment_size_order(sb, - grp->bb_free / grp->bb_fragments); - if (new_order == grp->bb_avg_fragment_size_order) + old = grp->bb_avg_fragment_size_order; + new = grp->bb_fragments == 0 ? -1 : + mb_avg_fragment_size_order(sb, grp->bb_free / grp->bb_fragments); + if (new == old) return;
- if (grp->bb_avg_fragment_size_order != -1) { - write_lock(&sbi->s_mb_avg_fragment_size_locks[ - grp->bb_avg_fragment_size_order]); + if (old >= 0) { + write_lock(&sbi->s_mb_avg_fragment_size_locks[old]); list_del(&grp->bb_avg_fragment_size_node); - write_unlock(&sbi->s_mb_avg_fragment_size_locks[ - grp->bb_avg_fragment_size_order]); + write_unlock(&sbi->s_mb_avg_fragment_size_locks[old]); + } + + grp->bb_avg_fragment_size_order = new; + if (new >= 0) { + write_lock(&sbi->s_mb_avg_fragment_size_locks[new]); + list_add_tail(&grp->bb_avg_fragment_size_node, + &sbi->s_mb_avg_fragment_size[new]); + write_unlock(&sbi->s_mb_avg_fragment_size_locks[new]); } - grp->bb_avg_fragment_size_order = new_order; - write_lock(&sbi->s_mb_avg_fragment_size_locks[ - grp->bb_avg_fragment_size_order]); - list_add_tail(&grp->bb_avg_fragment_size_node, - &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]); - write_unlock(&sbi->s_mb_avg_fragment_size_locks[ - grp->bb_avg_fragment_size_order]); }
/* @@ -1150,33 +1150,28 @@ static void mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) { struct ext4_sb_info *sbi = EXT4_SB(sb); - int i; + int new, old = grp->bb_largest_free_order;
- for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) - if (grp->bb_counters[i] > 0) + for (new = MB_NUM_ORDERS(sb) - 1; new >= 0; new--) + if (grp->bb_counters[new] > 0) break; + /* No need to move between order lists? */ - if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || - i == grp->bb_largest_free_order) { - grp->bb_largest_free_order = i; + if (new == old) return; - }
- if (grp->bb_largest_free_order >= 0) { - write_lock(&sbi->s_mb_largest_free_orders_locks[ - grp->bb_largest_free_order]); + if (old >= 0 && !list_empty(&grp->bb_largest_free_order_node)) { + write_lock(&sbi->s_mb_largest_free_orders_locks[old]); list_del_init(&grp->bb_largest_free_order_node); - write_unlock(&sbi->s_mb_largest_free_orders_locks[ - grp->bb_largest_free_order]); + write_unlock(&sbi->s_mb_largest_free_orders_locks[old]); } - grp->bb_largest_free_order = i; - if (grp->bb_largest_free_order >= 0 && grp->bb_free) { - write_lock(&sbi->s_mb_largest_free_orders_locks[ - grp->bb_largest_free_order]); + + grp->bb_largest_free_order = new; + if (test_opt2(sb, MB_OPTIMIZE_SCAN) && new >= 0 && grp->bb_free) { + write_lock(&sbi->s_mb_largest_free_orders_locks[new]); list_add_tail(&grp->bb_largest_free_order_node, - &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]); - write_unlock(&sbi->s_mb_largest_free_orders_locks[ - grp->bb_largest_free_order]); + &sbi->s_mb_largest_free_orders[new]); + write_unlock(&sbi->s_mb_largest_free_orders_locks[new]); } }
diff --git a/fs/ext4/orphan.c b/fs/ext4/orphan.c index e5b47dda3317..a23b0c01f809 100644 --- a/fs/ext4/orphan.c +++ b/fs/ext4/orphan.c @@ -590,8 +590,9 @@ int ext4_init_orphan_info(struct super_block *sb) } oi->of_blocks = inode->i_size >> sb->s_blocksize_bits; oi->of_csum_seed = EXT4_I(inode)->i_csum_seed; - oi->of_binfo = kmalloc(oi->of_blocks*sizeof(struct ext4_orphan_block), - GFP_KERNEL); + oi->of_binfo = kmalloc_array(oi->of_blocks, + sizeof(struct ext4_orphan_block), + GFP_KERNEL); if (!oi->of_binfo) { ret = -ENOMEM; goto out_put; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index d2b58f940aab..7e3906f13909 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2028,6 +2028,9 @@ int ext4_init_fs_context(struct fs_context *fc) fc->fs_private = ctx; fc->ops = &ext4_context_ops;
+ /* i_version is always enabled now */ + fc->sb_flags |= SB_I_VERSION; + return 0; }
@@ -5305,9 +5308,6 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
- /* i_version is always enabled now */ - sb->s_flags |= SB_I_VERSION; - err = ext4_check_feature_compatibility(sb, es, silent); if (err) goto failed_mount; @@ -5398,6 +5398,8 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) err = ext4_load_and_init_journal(sb, es, ctx); if (err) goto failed_mount3a; + if (bdev_read_only(sb->s_bdev)) + needs_recovery = 0; } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) && ext4_has_feature_journal_needs_recovery(sb)) { ext4_msg(sb, KERN_ERR, "required journal recovery " diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 8f0cb7c7eede..031015823acb 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -991,6 +991,18 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) return -EIO;
+ err = setattr_prepare(idmap, dentry, attr); + if (err) + return err; + + err = fscrypt_prepare_setattr(dentry, attr); + if (err) + return err; + + err = fsverity_prepare_setattr(dentry, attr); + if (err) + return err; + if (unlikely(IS_IMMUTABLE(inode))) return -EPERM;
@@ -1008,18 +1020,6 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, return -EINVAL; }
- err = setattr_prepare(idmap, dentry, attr); - if (err) - return err; - - err = fscrypt_prepare_setattr(dentry, attr); - if (err) - return err; - - err = fsverity_prepare_setattr(dentry, attr); - if (err) - return err; - if (is_quota_modification(idmap, inode, attr)) { err = f2fs_dquot_initialize(inode); if (err) diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index b00d66b95321..1b404937743c 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -799,6 +799,16 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) for (i = 1; i <= level; i++) { bool done = false;
+ if (nids[i] && nids[i] == dn->inode->i_ino) { + err = -EFSCORRUPTED; + f2fs_err_ratelimited(sbi, + "inode mapping table is corrupted, run fsck to fix it, " + "ino:%lu, nid:%u, level:%d, offset:%d", + dn->inode->i_ino, nids[i], level, offset[level]); + set_sbi_flag(sbi, SBI_NEED_FSCK); + goto release_pages; + } + if (!nids[i] && mode == ALLOC_NODE) { /* alloc new node */ if (!f2fs_alloc_nid(sbi, &(nids[i]))) { diff --git a/fs/file.c b/fs/file.c index f8cf6728c6a0..0ce6a6930276 100644 --- a/fs/file.c +++ b/fs/file.c @@ -90,18 +90,11 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) * 'unsigned long' in some places, but simply because that is how the Linux * kernel bitmaps are defined to work: they are not "bits in an array of bytes", * they are very much "bits in an array of unsigned long". - * - * The ALIGN(nr, BITS_PER_LONG) here is for clarity: since we just multiplied - * by that "1024/sizeof(ptr)" before, we already know there are sufficient - * clear low bits. Clang seems to realize that, gcc ends up being confused. - * - * On a 128-bit machine, the ALIGN() would actually matter. In the meantime, - * let's consider it documentation (and maybe a test-case for gcc to improve - * its code generation ;) */ -static struct fdtable * alloc_fdtable(unsigned int nr) +static struct fdtable *alloc_fdtable(unsigned int slots_wanted) { struct fdtable *fdt; + unsigned int nr; void *data;
/* @@ -109,22 +102,47 @@ static struct fdtable * alloc_fdtable(unsigned int nr) * Allocation steps are keyed to the size of the fdarray, since it * grows far faster than any of the other dynamic data. We try to fit * the fdarray into comfortable page-tuned chunks: starting at 1024B - * and growing in powers of two from there on. + * and growing in powers of two from there on. Since we called only + * with slots_wanted > BITS_PER_LONG (embedded instance in files->fdtab + * already gives BITS_PER_LONG slots), the above boils down to + * 1. use the smallest power of two large enough to give us that many + * slots. + * 2. on 32bit skip 64 and 128 - the minimal capacity we want there is + * 256 slots (i.e. 1Kb fd array). + * 3. on 64bit don't skip anything, 1Kb fd array means 128 slots there + * and we are never going to be asked for 64 or less. */ - nr /= (1024 / sizeof(struct file *)); - nr = roundup_pow_of_two(nr + 1); - nr *= (1024 / sizeof(struct file *)); - nr = ALIGN(nr, BITS_PER_LONG); + if (IS_ENABLED(CONFIG_32BIT) && slots_wanted < 256) + nr = 256; + else + nr = roundup_pow_of_two(slots_wanted); /* * Note that this can drive nr *below* what we had passed if sysctl_nr_open - * had been set lower between the check in expand_files() and here. Deal - * with that in caller, it's cheaper that way. + * had been set lower between the check in expand_files() and here. * * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise * bitmaps handling below becomes unpleasant, to put it mildly... */ - if (unlikely(nr > sysctl_nr_open)) - nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1; + if (unlikely(nr > sysctl_nr_open)) { + nr = round_down(sysctl_nr_open, BITS_PER_LONG); + if (nr < slots_wanted) + return ERR_PTR(-EMFILE); + } + + /* + * Check if the allocation size would exceed INT_MAX. kvmalloc_array() + * and kvmalloc() will warn if the allocation size is greater than + * INT_MAX, as filp_cachep objects are not __GFP_NOWARN. + * + * This can happen when sysctl_nr_open is set to a very high value and + * a process tries to use a file descriptor near that limit. For example, + * if sysctl_nr_open is set to 1073741816 (0x3ffffff8) - which is what + * systemd typically sets it to - then trying to use a file descriptor + * close to that value will require allocating a file descriptor table + * that exceeds 8GB in size. + */ + if (unlikely(nr > INT_MAX / sizeof(struct file *))) + return ERR_PTR(-EMFILE);
fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT); if (!fdt) @@ -153,7 +171,7 @@ static struct fdtable * alloc_fdtable(unsigned int nr) out_fdt: kfree(fdt); out: - return NULL; + return ERR_PTR(-ENOMEM); }
/* @@ -170,7 +188,7 @@ static int expand_fdtable(struct files_struct *files, unsigned int nr) struct fdtable *new_fdt, *cur_fdt;
spin_unlock(&files->file_lock); - new_fdt = alloc_fdtable(nr); + new_fdt = alloc_fdtable(nr + 1);
/* make sure all fd_install() have seen resize_in_progress * or have finished their rcu_read_lock_sched() section. @@ -179,16 +197,8 @@ static int expand_fdtable(struct files_struct *files, unsigned int nr) synchronize_rcu();
spin_lock(&files->file_lock); - if (!new_fdt) - return -ENOMEM; - /* - * extremely unlikely race - sysctl_nr_open decreased between the check in - * caller and alloc_fdtable(). Cheaper to catch it here... - */ - if (unlikely(new_fdt->max_fds <= nr)) { - __free_fdtable(new_fdt); - return -EMFILE; - } + if (IS_ERR(new_fdt)) + return PTR_ERR(new_fdt); cur_fdt = files_fdtable(files); BUG_ON(nr < cur_fdt->max_fds); copy_fdtable(new_fdt, cur_fdt); @@ -302,7 +312,6 @@ struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_ho struct file **old_fds, **new_fds; unsigned int open_files, i; struct fdtable *old_fdt, *new_fdt; - int error;
newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); if (!newf) @@ -334,17 +343,10 @@ struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_ho if (new_fdt != &newf->fdtab) __free_fdtable(new_fdt);
- new_fdt = alloc_fdtable(open_files - 1); - if (!new_fdt) { - error = -ENOMEM; - goto out_release; - } - - /* beyond sysctl_nr_open; nothing to do */ - if (unlikely(new_fdt->max_fds < open_files)) { - __free_fdtable(new_fdt); - error = -EMFILE; - goto out_release; + new_fdt = alloc_fdtable(open_files); + if (IS_ERR(new_fdt)) { + kmem_cache_free(files_cachep, newf); + return ERR_CAST(new_fdt); }
/* @@ -393,10 +395,6 @@ struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_ho rcu_assign_pointer(newf->fdt, new_fdt);
return newf; - -out_release: - kmem_cache_free(files_cachep, newf); - return ERR_PTR(error); }
static struct fdtable *close_files(struct files_struct * files) diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 1f42eae112fb..b1a368fc089f 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -93,6 +93,7 @@ const struct address_space_operations gfs2_meta_aops = { .invalidate_folio = block_invalidate_folio, .writepage = gfs2_aspace_writepage, .release_folio = gfs2_release_folio, + .migrate_folio = buffer_migrate_folio_norefs, };
const struct address_space_operations gfs2_rgrp_aops = { @@ -100,6 +101,7 @@ const struct address_space_operations gfs2_rgrp_aops = { .invalidate_folio = block_invalidate_folio, .writepage = gfs2_aspace_writepage, .release_folio = gfs2_release_folio, + .migrate_folio = buffer_migrate_folio_norefs, };
/** diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c index ef9498a6e88a..34e9804e0f36 100644 --- a/fs/hfs/bfind.c +++ b/fs/hfs/bfind.c @@ -16,6 +16,9 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) { void *ptr;
+ if (!tree || !fd) + return -EINVAL; + fd->tree = tree; fd->bnode = NULL; ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL); diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c index cb823a8a6ba9..e8cd1a31f247 100644 --- a/fs/hfs/bnode.c +++ b/fs/hfs/bnode.c @@ -15,6 +15,48 @@
#include "btree.h"
+static inline +bool is_bnode_offset_valid(struct hfs_bnode *node, int off) +{ + bool is_valid = off < node->tree->node_size; + + if (!is_valid) { + pr_err("requested invalid offset: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d\n", + node->this, node->type, node->height, + node->tree->node_size, off); + } + + return is_valid; +} + +static inline +int check_and_correct_requested_length(struct hfs_bnode *node, int off, int len) +{ + unsigned int node_size; + + if (!is_bnode_offset_valid(node, off)) + return 0; + + node_size = node->tree->node_size; + + if ((off + len) > node_size) { + int new_len = (int)node_size - off; + + pr_err("requested length has been corrected: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d, " + "requested_len %d, corrected_len %d\n", + node->this, node->type, node->height, + node->tree->node_size, off, len, new_len); + + return new_len; + } + + return len; +} + void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) { struct page *page; @@ -22,6 +64,20 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) int bytes_read; int bytes_to_read;
+ if (!is_bnode_offset_valid(node, off)) + return; + + if (len == 0) { + pr_err("requested zero length: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d, len %d\n", + node->this, node->type, node->height, + node->tree->node_size, off, len); + return; + } + + len = check_and_correct_requested_length(node, off, len); + off += node->page_offset; pagenum = off >> PAGE_SHIFT; off &= ~PAGE_MASK; /* compute page offset for the first page */ @@ -80,6 +136,20 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) { struct page *page;
+ if (!is_bnode_offset_valid(node, off)) + return; + + if (len == 0) { + pr_err("requested zero length: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d, len %d\n", + node->this, node->type, node->height, + node->tree->node_size, off, len); + return; + } + + len = check_and_correct_requested_length(node, off, len); + off += node->page_offset; page = node->page[0];
@@ -104,6 +174,20 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) { struct page *page;
+ if (!is_bnode_offset_valid(node, off)) + return; + + if (len == 0) { + pr_err("requested zero length: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d, len %d\n", + node->this, node->type, node->height, + node->tree->node_size, off, len); + return; + } + + len = check_and_correct_requested_length(node, off, len); + off += node->page_offset; page = node->page[0];
@@ -119,6 +203,10 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); if (!len) return; + + len = check_and_correct_requested_length(src_node, src, len); + len = check_and_correct_requested_length(dst_node, dst, len); + src += src_node->page_offset; dst += dst_node->page_offset; src_page = src_node->page[0]; @@ -136,6 +224,10 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); if (!len) return; + + len = check_and_correct_requested_length(node, src, len); + len = check_and_correct_requested_length(node, dst, len); + src += node->page_offset; dst += node->page_offset; page = node->page[0]; @@ -482,6 +574,7 @@ void hfs_bnode_put(struct hfs_bnode *node) if (test_bit(HFS_BNODE_DELETED, &node->flags)) { hfs_bnode_unhash(node); spin_unlock(&tree->hash_lock); + hfs_bnode_clear(node, 0, tree->node_size); hfs_bmap_free(node); hfs_bnode_free(node); return; diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c index 2fa4b1f8cc7f..e86e1e235658 100644 --- a/fs/hfs/btree.c +++ b/fs/hfs/btree.c @@ -21,8 +21,12 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke struct hfs_btree *tree; struct hfs_btree_header_rec *head; struct address_space *mapping; - struct page *page; + struct folio *folio; + struct buffer_head *bh; unsigned int size; + u16 dblock; + sector_t start_block; + loff_t offset;
tree = kzalloc(sizeof(*tree), GFP_KERNEL); if (!tree) @@ -75,12 +79,40 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke unlock_new_inode(tree->inode);
mapping = tree->inode->i_mapping; - page = read_mapping_page(mapping, 0, NULL); - if (IS_ERR(page)) + folio = filemap_grab_folio(mapping, 0); + if (IS_ERR(folio)) goto free_inode;
+ folio_zero_range(folio, 0, folio_size(folio)); + + dblock = hfs_ext_find_block(HFS_I(tree->inode)->first_extents, 0); + start_block = HFS_SB(sb)->fs_start + (dblock * HFS_SB(sb)->fs_div); + + size = folio_size(folio); + offset = 0; + while (size > 0) { + size_t len; + + bh = sb_bread(sb, start_block); + if (!bh) { + pr_err("unable to read tree header\n"); + goto put_folio; + } + + len = min_t(size_t, folio_size(folio), sb->s_blocksize); + memcpy_to_folio(folio, offset, bh->b_data, sb->s_blocksize); + + brelse(bh); + + start_block++; + offset += len; + size -= len; + } + + folio_mark_uptodate(folio); + /* Load the header */ - head = (struct hfs_btree_header_rec *)(kmap_local_page(page) + + head = (struct hfs_btree_header_rec *)(kmap_local_folio(folio, 0) + sizeof(struct hfs_bnode_desc)); tree->root = be32_to_cpu(head->root); tree->leaf_count = be32_to_cpu(head->leaf_count); @@ -95,22 +127,22 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
size = tree->node_size; if (!is_power_of_2(size)) - goto fail_page; + goto fail_folio; if (!tree->node_count) - goto fail_page; + goto fail_folio; switch (id) { case HFS_EXT_CNID: if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) { pr_err("invalid extent max_key_len %d\n", tree->max_key_len); - goto fail_page; + goto fail_folio; } break; case HFS_CAT_CNID: if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) { pr_err("invalid catalog max_key_len %d\n", tree->max_key_len); - goto fail_page; + goto fail_folio; } break; default: @@ -121,12 +153,15 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
kunmap_local(head); - put_page(page); + folio_unlock(folio); + folio_put(folio); return tree;
-fail_page: +fail_folio: kunmap_local(head); - put_page(page); +put_folio: + folio_unlock(folio); + folio_put(folio); free_inode: tree->inode->i_mapping->a_ops = &hfs_aops; iput(tree->inode); diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c index 6d1878b99b30..941c92525815 100644 --- a/fs/hfs/extent.c +++ b/fs/hfs/extent.c @@ -71,7 +71,7 @@ int hfs_ext_keycmp(const btree_key *key1, const btree_key *key2) * * Find a block within an extent record */ -static u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off) +u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off) { int i; u16 count; diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h index 49d02524e667..f1402d71b092 100644 --- a/fs/hfs/hfs_fs.h +++ b/fs/hfs/hfs_fs.h @@ -190,6 +190,7 @@ extern const struct inode_operations hfs_dir_inode_operations;
/* extent.c */ extern int hfs_ext_keycmp(const btree_key *, const btree_key *); +extern u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off); extern int hfs_free_fork(struct super_block *, struct hfs_cat_file *, int); extern int hfs_ext_write_extent(struct inode *); extern int hfs_extend_file(struct inode *); diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c index 079ea80534f7..14f4995588ff 100644 --- a/fs/hfsplus/bnode.c +++ b/fs/hfsplus/bnode.c @@ -18,12 +18,68 @@ #include "hfsplus_fs.h" #include "hfsplus_raw.h"
+static inline +bool is_bnode_offset_valid(struct hfs_bnode *node, int off) +{ + bool is_valid = off < node->tree->node_size; + + if (!is_valid) { + pr_err("requested invalid offset: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d\n", + node->this, node->type, node->height, + node->tree->node_size, off); + } + + return is_valid; +} + +static inline +int check_and_correct_requested_length(struct hfs_bnode *node, int off, int len) +{ + unsigned int node_size; + + if (!is_bnode_offset_valid(node, off)) + return 0; + + node_size = node->tree->node_size; + + if ((off + len) > node_size) { + int new_len = (int)node_size - off; + + pr_err("requested length has been corrected: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d, " + "requested_len %d, corrected_len %d\n", + node->this, node->type, node->height, + node->tree->node_size, off, len, new_len); + + return new_len; + } + + return len; +} + /* Copy a specified range of bytes from the raw data of a node */ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) { struct page **pagep; int l;
+ if (!is_bnode_offset_valid(node, off)) + return; + + if (len == 0) { + pr_err("requested zero length: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d, len %d\n", + node->this, node->type, node->height, + node->tree->node_size, off, len); + return; + } + + len = check_and_correct_requested_length(node, off, len); + off += node->page_offset; pagep = node->page + (off >> PAGE_SHIFT); off &= ~PAGE_MASK; @@ -81,6 +137,20 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) struct page **pagep; int l;
+ if (!is_bnode_offset_valid(node, off)) + return; + + if (len == 0) { + pr_err("requested zero length: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d, len %d\n", + node->this, node->type, node->height, + node->tree->node_size, off, len); + return; + } + + len = check_and_correct_requested_length(node, off, len); + off += node->page_offset; pagep = node->page + (off >> PAGE_SHIFT); off &= ~PAGE_MASK; @@ -109,6 +179,20 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) struct page **pagep; int l;
+ if (!is_bnode_offset_valid(node, off)) + return; + + if (len == 0) { + pr_err("requested zero length: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d, len %d\n", + node->this, node->type, node->height, + node->tree->node_size, off, len); + return; + } + + len = check_and_correct_requested_length(node, off, len); + off += node->page_offset; pagep = node->page + (off >> PAGE_SHIFT); off &= ~PAGE_MASK; @@ -133,6 +217,10 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); if (!len) return; + + len = check_and_correct_requested_length(src_node, src, len); + len = check_and_correct_requested_length(dst_node, dst, len); + src += src_node->page_offset; dst += dst_node->page_offset; src_page = src_node->page + (src >> PAGE_SHIFT); @@ -187,6 +275,10 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); if (!len) return; + + len = check_and_correct_requested_length(node, src, len); + len = check_and_correct_requested_length(node, dst, len); + src += node->page_offset; dst += node->page_offset; if (dst > src) { diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c index 73342c925a4b..36b6cf2a3abb 100644 --- a/fs/hfsplus/unicode.c +++ b/fs/hfsplus/unicode.c @@ -132,7 +132,14 @@ int hfsplus_uni2asc(struct super_block *sb,
op = astr; ip = ustr->unicode; + ustrlen = be16_to_cpu(ustr->length); + if (ustrlen > HFSPLUS_MAX_STRLEN) { + ustrlen = HFSPLUS_MAX_STRLEN; + pr_err("invalid length %u has been corrected to %d\n", + be16_to_cpu(ustr->length), ustrlen); + } + len = *len_p; ce1 = NULL; compose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags); diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c index f7f9d0889df3..d5fd8e068486 100644 --- a/fs/hfsplus/xattr.c +++ b/fs/hfsplus/xattr.c @@ -172,7 +172,11 @@ static int hfsplus_create_attributes_file(struct super_block *sb) return PTR_ERR(attr_file); }
- BUG_ON(i_size_read(attr_file) != 0); + if (i_size_read(attr_file) != 0) { + err = -EIO; + pr_err("detected inconsistent attributes file, running fsck.hfsplus is recommended.\n"); + goto end_attr_file_creation; + }
hip = HFSPLUS_I(attr_file);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index ac519515ef6c..ab951fd47531 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -136,7 +136,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND); vma->vm_ops = &hugetlb_vm_ops;
- ret = seal_check_future_write(info->seals, vma); + ret = seal_check_write(info->seals, vma); if (ret) return ret;
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index 8fda66c98a61..368ae50d8a59 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -285,6 +285,7 @@ int jbd2_log_do_checkpoint(journal_t *journal) retry: if (batch_count) __flush_batch(journal, &batch_count); + cond_resched(); spin_lock(&journal->j_list_lock); goto restart; } diff --git a/fs/jfs/file.c b/fs/jfs/file.c index 01b6912e60f8..742cadd1f37e 100644 --- a/fs/jfs/file.c +++ b/fs/jfs/file.c @@ -44,6 +44,9 @@ static int jfs_open(struct inode *inode, struct file *file) { int rc;
+ if (S_ISREG(inode->i_mode) && inode->i_size < 0) + return -EIO; + if ((rc = dquot_file_open(inode, file))) return rc;
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 920d58a1566b..66c38ef5e571 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -145,9 +145,9 @@ void jfs_evict_inode(struct inode *inode) if (!inode->i_nlink && !is_bad_inode(inode)) { dquot_initialize(inode);
+ truncate_inode_pages_final(&inode->i_data); if (JFS_IP(inode)->fileset == FILESYSTEM_I) { struct inode *ipimap = JFS_SBI(inode->i_sb)->ipimap; - truncate_inode_pages_final(&inode->i_data);
if (test_cflag(COMMIT_Freewmap, inode)) jfs_free_zero_link(inode); diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index 5a877261c3fe..cdfa699cd7c8 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -1389,6 +1389,12 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results) (1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth; ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1));
+ if (ti < 0 || ti >= le32_to_cpu(dcp->nleafs)) { + jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmapctl page\n"); + release_metapage(mp); + return -EIO; + } + /* dmap control page trees fan-out by 4 and a single allocation * group may be described by 1 or 2 subtrees within the ag level * dmap control page, depending upon the ag size. examine the ag's diff --git a/fs/libfs.c b/fs/libfs.c index f5566964aa7d..b913ab238cc1 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -610,7 +610,7 @@ void simple_recursive_removal(struct dentry *dentry, struct dentry *victim = NULL, *child; struct inode *inode = this->d_inode;
- inode_lock(inode); + inode_lock_nested(inode, I_MUTEX_CHILD); if (d_is_dir(this)) inode->i_flags |= S_DEAD; while ((child = find_next_child(this, victim)) == NULL) { @@ -622,7 +622,7 @@ void simple_recursive_removal(struct dentry *dentry, victim = this; this = this->d_parent; inode = this->d_inode; - inode_lock(inode); + inode_lock_nested(inode, I_MUTEX_CHILD); if (simple_positive(victim)) { d_invalidate(victim); // avoid lost mounts if (d_is_dir(victim)) diff --git a/fs/namespace.c b/fs/namespace.c index 6a9c53c800c4..f79226472251 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -2526,6 +2526,19 @@ static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp) return attach_recursive_mnt(mnt, p, mp, 0); }
+static int may_change_propagation(const struct mount *m) +{ + struct mnt_namespace *ns = m->mnt_ns; + + // it must be mounted in some namespace + if (IS_ERR_OR_NULL(ns)) // is_mounted() + return -EINVAL; + // and the caller must be admin in userns of that namespace + if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) + return -EPERM; + return 0; +} + /* * Sanity check the flags to change_mnt_propagation. */ @@ -2562,10 +2575,10 @@ static int do_change_type(struct path *path, int ms_flags) return -EINVAL;
namespace_lock(); - if (!check_mnt(mnt)) { - err = -EINVAL; + err = may_change_propagation(mnt); + if (err) goto out_unlock; - } + if (type == MS_SHARED) { err = invent_group_ids(mnt, recurse); if (err) @@ -2960,18 +2973,11 @@ static int do_set_group(struct path *from_path, struct path *to_path)
namespace_lock();
- err = -EINVAL; - /* To and From must be mounted */ - if (!is_mounted(&from->mnt)) - goto out; - if (!is_mounted(&to->mnt)) - goto out; - - err = -EPERM; - /* We should be allowed to modify mount namespaces of both mounts */ - if (!ns_capable(from->mnt_ns->user_ns, CAP_SYS_ADMIN)) + err = may_change_propagation(from); + if (err) goto out; - if (!ns_capable(to->mnt_ns->user_ns, CAP_SYS_ADMIN)) + err = may_change_propagation(to); + if (err) goto out;
err = -EINVAL; diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 6be13e0ec170..e498aade8c47 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -149,8 +149,8 @@ do_add_page_to_bio(struct bio *bio, int npg, enum req_op op, sector_t isect,
/* limit length to what the device mapping allows */ end = disk_addr + *len; - if (end >= map->start + map->len) - *len = map->start + map->len - disk_addr; + if (end >= map->disk_offset + map->len) + *len = map->disk_offset + map->len - disk_addr;
retry: if (!bio) { diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c index 65cbb5607a5f..61ee0b6c0fba 100644 --- a/fs/nfs/blocklayout/dev.c +++ b/fs/nfs/blocklayout/dev.c @@ -199,10 +199,11 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset, struct pnfs_block_dev *child; u64 chunk; u32 chunk_idx; + u64 disk_chunk; u64 disk_offset;
chunk = div_u64(offset, dev->chunk_size); - div_u64_rem(chunk, dev->nr_children, &chunk_idx); + disk_chunk = div_u64_rem(chunk, dev->nr_children, &chunk_idx);
if (chunk_idx >= dev->nr_children) { dprintk("%s: invalid chunk idx %d (%lld/%lld)\n", @@ -215,7 +216,7 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset, offset = chunk * dev->chunk_size;
/* disk offset of the stripe */ - disk_offset = div_u64(offset, dev->nr_children); + disk_offset = disk_chunk * dev->chunk_size;
child = &dev->children[chunk_idx]; child->map(child, disk_offset, map); diff --git a/fs/nfs/blocklayout/extent_tree.c b/fs/nfs/blocklayout/extent_tree.c index 8f7cff7a4293..0add0f329816 100644 --- a/fs/nfs/blocklayout/extent_tree.c +++ b/fs/nfs/blocklayout/extent_tree.c @@ -552,6 +552,15 @@ static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p, return ret; }
+/** + * ext_tree_prepare_commit - encode extents that need to be committed + * @arg: layout commit data + * + * Return values: + * %0: Success, all required extents are encoded + * %-ENOSPC: Some extents are encoded, but not all, due to RPC size limit + * %-ENOMEM: Out of memory, extents not encoded + */ int ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg) { @@ -568,12 +577,12 @@ ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg) start_p = page_address(arg->layoutupdate_page); arg->layoutupdate_pages = &arg->layoutupdate_page;
-retry: - ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, &count, &arg->lastbytewritten); + ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, + &count, &arg->lastbytewritten); if (unlikely(ret)) { ext_tree_free_commitdata(arg, buffer_size);
- buffer_size = ext_tree_layoutupdate_size(bl, count); + buffer_size = NFS_SERVER(arg->inode)->wsize; count = 0;
arg->layoutupdate_pages = @@ -588,7 +597,8 @@ ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg) return -ENOMEM; }
- goto retry; + ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, + &count, &arg->lastbytewritten); }
*start_p = cpu_to_be32(count); @@ -608,7 +618,7 @@ ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg) }
dprintk("%s found %zu ranges\n", __func__, count); - return 0; + return ret; }
void diff --git a/fs/nfs/client.c b/fs/nfs/client.c index aa09f930eeaf..cc764da581c4 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -668,6 +668,44 @@ struct nfs_client *nfs_init_client(struct nfs_client *clp, } EXPORT_SYMBOL_GPL(nfs_init_client);
+static void nfs4_server_set_init_caps(struct nfs_server *server) +{ +#if IS_ENABLED(CONFIG_NFS_V4) + /* Set the basic capabilities */ + server->caps = server->nfs_client->cl_mvops->init_caps; + if (server->flags & NFS_MOUNT_NORDIRPLUS) + server->caps &= ~NFS_CAP_READDIRPLUS; + if (server->nfs_client->cl_proto == XPRT_TRANSPORT_RDMA) + server->caps &= ~NFS_CAP_READ_PLUS; + + /* + * Don't use NFS uid/gid mapping if we're using AUTH_SYS or lower + * authentication. + */ + if (nfs4_disable_idmapping && + server->client->cl_auth->au_flavor == RPC_AUTH_UNIX) + server->caps |= NFS_CAP_UIDGID_NOMAP; +#endif +} + +void nfs_server_set_init_caps(struct nfs_server *server) +{ + switch (server->nfs_client->rpc_ops->version) { + case 2: + server->caps = NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS; + break; + case 3: + server->caps = NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS; + if (!(server->flags & NFS_MOUNT_NORDIRPLUS)) + server->caps |= NFS_CAP_READDIRPLUS; + break; + default: + nfs4_server_set_init_caps(server); + break; + } +} +EXPORT_SYMBOL_GPL(nfs_server_set_init_caps); + /* * Create a version 2 or 3 client */ @@ -709,7 +747,6 @@ static int nfs_init_server(struct nfs_server *server, /* Initialise the client representation from the mount data */ server->flags = ctx->flags; server->options = ctx->options; - server->caps |= NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS;
switch (clp->rpc_ops->version) { case 2: @@ -745,6 +782,8 @@ static int nfs_init_server(struct nfs_server *server, if (error < 0) goto error;
+ nfs_server_set_init_caps(server); + /* Preserve the values of mount_server-related mount options */ if (ctx->mount_server.addrlen) { memcpy(&server->mountd_address, &ctx->mount_server.address, @@ -919,7 +958,6 @@ void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *sour target->acregmax = source->acregmax; target->acdirmin = source->acdirmin; target->acdirmax = source->acdirmax; - target->caps = source->caps; target->options = source->options; target->auth_info = source->auth_info; target->port = source->port; @@ -1145,6 +1183,8 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source, if (error < 0) goto out_free_server;
+ nfs_server_set_init_caps(server); + /* probe the filesystem info for this server filesystem */ error = nfs_probe_server(server, fh); if (error < 0) diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 8870c72416ac..4eea91d054b2 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -223,7 +223,7 @@ extern struct nfs_client * nfs4_find_client_sessionid(struct net *, const struct sockaddr *, struct nfs4_sessionid *, u32); extern struct nfs_server *nfs_create_server(struct fs_context *); -extern void nfs4_server_set_init_caps(struct nfs_server *); +extern void nfs_server_set_init_caps(struct nfs_server *); extern struct nfs_server *nfs4_create_server(struct fs_context *); extern struct nfs_server *nfs4_create_referral_server(struct fs_context *); extern int nfs4_update_server(struct nfs_server *server, const char *hostname, diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index ac80f87cb9d9..f6dc42de48f0 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -1079,24 +1079,6 @@ static void nfs4_session_limit_xasize(struct nfs_server *server) #endif }
-void nfs4_server_set_init_caps(struct nfs_server *server) -{ - /* Set the basic capabilities */ - server->caps |= server->nfs_client->cl_mvops->init_caps; - if (server->flags & NFS_MOUNT_NORDIRPLUS) - server->caps &= ~NFS_CAP_READDIRPLUS; - if (server->nfs_client->cl_proto == XPRT_TRANSPORT_RDMA) - server->caps &= ~NFS_CAP_READ_PLUS; - - /* - * Don't use NFS uid/gid mapping if we're using AUTH_SYS or lower - * authentication. - */ - if (nfs4_disable_idmapping && - server->client->cl_auth->au_flavor == RPC_AUTH_UNIX) - server->caps |= NFS_CAP_UIDGID_NOMAP; -} - static int nfs4_server_common_setup(struct nfs_server *server, struct nfs_fh *mntfh, bool auth_probe) { @@ -1111,7 +1093,7 @@ static int nfs4_server_common_setup(struct nfs_server *server, if (error < 0) goto out;
- nfs4_server_set_init_caps(server); + nfs_server_set_init_caps(server);
/* Probe the root fh to retrieve its FSID and filehandle */ error = nfs4_get_rootfh(server, mntfh, auth_probe); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 89d88d37e0cc..6debcfc63222 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3951,7 +3951,7 @@ int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) }; int err;
- nfs4_server_set_init_caps(server); + nfs_server_set_init_caps(server); do { err = nfs4_handle_exception(server, _nfs4_server_capabilities(server, fhandle), diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 79d1ffdcbebd..b40c20bd364b 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -3216,6 +3216,7 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync) struct nfs_inode *nfsi = NFS_I(inode); loff_t end_pos; int status; + bool mark_as_dirty = false;
if (!pnfs_layoutcommit_outstanding(inode)) return 0; @@ -3267,19 +3268,23 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync) if (ld->prepare_layoutcommit) { status = ld->prepare_layoutcommit(&data->args); if (status) { - put_cred(data->cred); + if (status != -ENOSPC) + put_cred(data->cred); spin_lock(&inode->i_lock); set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags); if (end_pos > nfsi->layout->plh_lwb) nfsi->layout->plh_lwb = end_pos; - goto out_unlock; + if (status != -ENOSPC) + goto out_unlock; + spin_unlock(&inode->i_lock); + mark_as_dirty = true; } }
status = nfs4_proc_layoutcommit(data, sync); out: - if (status) + if (status || mark_as_dirty) mark_inode_dirty_sync(inode); dprintk("<-- %s status %d\n", __func__, status); return status; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index e2875706e6bf..4aeb08040f3e 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -4282,10 +4282,16 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, } status = nfs_ok; if (conf) { - old = unconf; - unhash_client_locked(old); - nfsd4_change_callback(conf, &unconf->cl_cb_conn); - } else { + if (get_client_locked(conf) == nfs_ok) { + old = unconf; + unhash_client_locked(old); + nfsd4_change_callback(conf, &unconf->cl_cb_conn); + } else { + conf = NULL; + } + } + + if (!conf) { old = find_confirmed_client_by_name(&unconf->cl_name, nn); if (old) { status = nfserr_clid_inuse; @@ -4302,10 +4308,14 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, } trace_nfsd_clid_replaced(&old->cl_clientid); } + status = get_client_locked(unconf); + if (status != nfs_ok) { + old = NULL; + goto out; + } move_to_confirmed(unconf); conf = unconf; } - get_client_locked(conf); spin_unlock(&nn->client_lock); if (conf == unconf) fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY); @@ -5765,6 +5775,20 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf status = nfs4_check_deleg(cl, open, &dp); if (status) goto out; + if (dp && nfsd4_is_deleg_cur(open) && + (dp->dl_stid.sc_file != fp)) { + /* + * RFC8881 section 8.2.4 mandates the server to return + * NFS4ERR_BAD_STATEID if the selected table entry does + * not match the current filehandle. However returning + * NFS4ERR_BAD_STATEID in the OPEN can cause the client + * to repeatedly retry the operation with the same + * stateid, since the stateid itself is valid. To avoid + * this situation NFSD returns NFS4ERR_INVAL instead. + */ + status = nfserr_inval; + goto out; + } stp = nfsd4_find_and_lock_existing_open(fp, open); } else { open->op_file = NULL; diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c index e1b856ecce61..6b93c909bdc9 100644 --- a/fs/ntfs3/dir.c +++ b/fs/ntfs3/dir.c @@ -304,6 +304,9 @@ static inline bool ntfs_dir_emit(struct ntfs_sb_info *sbi, if (sbi->options->nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN)) return true;
+ if (fname->name_len + sizeof(struct NTFS_DE) > le16_to_cpu(e->size)) + return true; + name_len = ntfs_utf16_to_nls(sbi, fname->name, fname->name_len, name, PATH_MAX); if (name_len <= 0) { diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c index af7c0cbba74e..0150a2210209 100644 --- a/fs/ntfs3/inode.c +++ b/fs/ntfs3/inode.c @@ -1130,10 +1130,10 @@ int inode_write_data(struct inode *inode, const void *data, size_t bytes) * Number of bytes for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK) * for unicode string of @uni_len length. */ -static inline u32 ntfs_reparse_bytes(u32 uni_len) +static inline u32 ntfs_reparse_bytes(u32 uni_len, bool is_absolute) { /* Header + unicode string + decorated unicode string. */ - return sizeof(short) * (2 * uni_len + 4) + + return sizeof(short) * (2 * uni_len + (is_absolute ? 4 : 0)) + offsetof(struct REPARSE_DATA_BUFFER, SymbolicLinkReparseBuffer.PathBuffer); } @@ -1146,8 +1146,11 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname, struct REPARSE_DATA_BUFFER *rp; __le16 *rp_name; typeof(rp->SymbolicLinkReparseBuffer) *rs; + bool is_absolute;
- rp = kzalloc(ntfs_reparse_bytes(2 * size + 2), GFP_NOFS); + is_absolute = (strlen(symname) > 1 && symname[1] == ':'); + + rp = kzalloc(ntfs_reparse_bytes(2 * size + 2, is_absolute), GFP_NOFS); if (!rp) return ERR_PTR(-ENOMEM);
@@ -1162,7 +1165,7 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname, goto out;
/* err = the length of unicode name of symlink. */ - *nsize = ntfs_reparse_bytes(err); + *nsize = ntfs_reparse_bytes(err, is_absolute);
if (*nsize > sbi->reparse.max_size) { err = -EFBIG; @@ -1182,7 +1185,7 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
/* PrintName + SubstituteName. */ rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err); - rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + 8); + rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + (is_absolute ? 8 : 0)); rs->PrintNameLength = rs->SubstituteNameOffset;
/* @@ -1190,16 +1193,18 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname, * parse this path. * 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE). */ - rs->Flags = 0; + rs->Flags = cpu_to_le32(is_absolute ? 0 : SYMLINK_FLAG_RELATIVE);
- memmove(rp_name + err + 4, rp_name, sizeof(short) * err); + memmove(rp_name + err + (is_absolute ? 4 : 0), rp_name, sizeof(short) * err);
- /* Decorate SubstituteName. */ - rp_name += err; - rp_name[0] = cpu_to_le16('\'); - rp_name[1] = cpu_to_le16('?'); - rp_name[2] = cpu_to_le16('?'); - rp_name[3] = cpu_to_le16('\'); + if (is_absolute) { + /* Decorate SubstituteName. */ + rp_name += err; + rp_name[0] = cpu_to_le16('\'); + rp_name[1] = cpu_to_le16('?'); + rp_name[2] = cpu_to_le16('?'); + rp_name[3] = cpu_to_le16('\'); + }
return rp; out: diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c index b57140ebfad0..cd4bfd92ebd6 100644 --- a/fs/orangefs/orangefs-debugfs.c +++ b/fs/orangefs/orangefs-debugfs.c @@ -354,7 +354,7 @@ static ssize_t orangefs_debug_read(struct file *file, goto out;
mutex_lock(&orangefs_debug_lock); - sprintf_ret = sprintf(buf, "%s", (char *)file->private_data); + sprintf_ret = scnprintf(buf, ORANGEFS_MAX_DEBUG_STRING_LEN, "%s", (char *)file->private_data); mutex_unlock(&orangefs_debug_lock);
read_ret = simple_read_from_buffer(ubuf, count, ppos, buf, sprintf_ret); diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c index 81d425f571e2..91f4e50af1e9 100644 --- a/fs/smb/client/cifssmb.c +++ b/fs/smb/client/cifssmb.c @@ -3984,6 +3984,12 @@ CIFSFindFirst(const unsigned int xid, struct cifs_tcon *tcon, pSMB->FileName[name_len] = 0; pSMB->FileName[name_len+1] = 0; name_len += 2; + } else if (!searchName[0]) { + pSMB->FileName[0] = CIFS_DIR_SEP(cifs_sb); + pSMB->FileName[1] = 0; + pSMB->FileName[2] = 0; + pSMB->FileName[3] = 0; + name_len = 4; } } else { name_len = copy_path_name(pSMB->FileName, searchName); @@ -3995,6 +4001,10 @@ CIFSFindFirst(const unsigned int xid, struct cifs_tcon *tcon, pSMB->FileName[name_len] = '*'; pSMB->FileName[name_len+1] = 0; name_len += 2; + } else if (!searchName[0]) { + pSMB->FileName[0] = CIFS_DIR_SEP(cifs_sb); + pSMB->FileName[1] = 0; + name_len = 2; } }
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c index 0588896c4456..986286ef4046 100644 --- a/fs/smb/client/connect.c +++ b/fs/smb/client/connect.c @@ -3162,18 +3162,15 @@ generic_ip_connect(struct TCP_Server_Info *server) struct net *net = cifs_net_ns(server); struct sock *sk;
- rc = __sock_create(net, sfamily, SOCK_STREAM, - IPPROTO_TCP, &server->ssocket, 1); + rc = sock_create_kern(net, sfamily, SOCK_STREAM, + IPPROTO_TCP, &server->ssocket); if (rc < 0) { cifs_server_dbg(VFS, "Error %d creating socket\n", rc); return rc; }
sk = server->ssocket->sk; - __netns_tracker_free(net, &sk->ns_tracker, false); - sk->sk_net_refcnt = 1; - get_net_track(net, &sk->ns_tracker, GFP_KERNEL); - sock_inuse_add(net, 1); + sk_net_refcnt_upgrade(sk);
/* BB other socket options to set KEEPALIVE, NODELAY? */ cifs_dbg(FYI, "Socket created\n"); @@ -3998,7 +3995,6 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses, return 0; }
- server->lstrp = jiffies; server->tcpStatus = CifsInNegotiate; server->neg_start = jiffies; spin_unlock(&server->srv_lock); diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c index c351da8c3e2e..bbde7180a90a 100644 --- a/fs/smb/client/sess.c +++ b/fs/smb/client/sess.c @@ -372,6 +372,7 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server) struct cifs_server_iface *old_iface = NULL; struct cifs_server_iface *last_iface = NULL; struct sockaddr_storage ss; + int retry = 0;
spin_lock(&ses->chan_lock); chan_index = cifs_ses_get_chan_index(ses, server); @@ -400,6 +401,7 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server) return; }
+try_again: last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface, iface_head); iface_min_speed = last_iface->speed; @@ -437,6 +439,13 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server) }
if (list_entry_is_head(iface, &ses->iface_list, iface_head)) { + list_for_each_entry(iface, &ses->iface_list, iface_head) + iface->weight_fulfilled = 0; + + /* see if it can be satisfied in second attempt */ + if (!retry++) + goto try_again; + iface = NULL; cifs_dbg(FYI, "unable to find a suitable iface\n"); } diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index d0734aa1961a..b74f76904739 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -730,6 +730,13 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, bytes_left -= sizeof(*p); break; } + /* Validate that Next doesn't point beyond the buffer */ + if (next > bytes_left) { + cifs_dbg(VFS, "%s: invalid Next pointer %zu > %zd\n", + __func__, next, bytes_left); + rc = -EINVAL; + goto out; + } p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next); bytes_left -= next; } @@ -741,7 +748,9 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, }
/* Azure rounds the buffer size up 8, to a 16 byte boundary */ - if ((bytes_left > 8) || p->Next) + if ((bytes_left > 8) || + (bytes_left >= offsetof(struct network_interface_info_ioctl_rsp, Next) + + sizeof(p->Next) && p->Next)) cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
ses->iface_last_update = jiffies; diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c index 48d020e1f663..713bd1dcd39c 100644 --- a/fs/smb/client/smbdirect.c +++ b/fs/smb/client/smbdirect.c @@ -282,18 +282,20 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc) log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n", request, wc->status);
- if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { - log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n", - wc->status, wc->opcode); - smbd_disconnect_rdma_connection(request->info); - } - for (i = 0; i < request->num_sge; i++) ib_dma_unmap_single(sc->ib.dev, request->sge[i].addr, request->sge[i].length, DMA_TO_DEVICE);
+ if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { + log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n", + wc->status, wc->opcode); + mempool_free(request, info->request_mempool); + smbd_disconnect_rdma_connection(info); + return; + } + if (atomic_dec_and_test(&request->info->send_pending)) wake_up(&request->info->wait_send_pending);
@@ -1336,10 +1338,6 @@ void smbd_destroy(struct TCP_Server_Info *server) log_rdma_event(INFO, "cancelling idle timer\n"); cancel_delayed_work_sync(&info->idle_timer_work);
- log_rdma_event(INFO, "wait for all send posted to IB to finish\n"); - wait_event(info->wait_send_pending, - atomic_read(&info->send_pending) == 0); - /* It's not possible for upper layer to get to reassembly */ log_rdma_event(INFO, "drain the reassembly queue\n"); do { @@ -2051,7 +2049,11 @@ int smbd_send(struct TCP_Server_Info *server, */
wait_event(info->wait_send_pending, - atomic_read(&info->send_pending) == 0); + atomic_read(&info->send_pending) == 0 || + sc->status != SMBDIRECT_SOCKET_CONNECTED); + + if (sc->status != SMBDIRECT_SOCKET_CONNECTED && rc == 0) + rc = -EAGAIN;
return rc; } diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c index 66b20c3d963e..f5ebc200dd73 100644 --- a/fs/smb/server/connection.c +++ b/fs/smb/server/connection.c @@ -503,7 +503,8 @@ void ksmbd_conn_transport_destroy(void) { mutex_lock(&init_lock); ksmbd_tcp_destroy(); - ksmbd_rdma_destroy(); + ksmbd_rdma_stop_listening(); stop_sessions(); + ksmbd_rdma_destroy(); mutex_unlock(&init_lock); } diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h index c769fe3859b3..29ba91fc5407 100644 --- a/fs/smb/server/connection.h +++ b/fs/smb/server/connection.h @@ -45,7 +45,12 @@ struct ksmbd_conn { struct mutex srv_mutex; int status; unsigned int cli_cap; - __be32 inet_addr; + union { + __be32 inet_addr; +#if IS_ENABLED(CONFIG_IPV6) + u8 inet6_addr[16]; +#endif + }; char *request_buf; struct ksmbd_transport *transport; struct nls_table *local_nls; diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c index e564432643ea..2a3ef29ac0eb 100644 --- a/fs/smb/server/oplock.c +++ b/fs/smb/server/oplock.c @@ -1102,8 +1102,10 @@ void smb_send_parent_lease_break_noti(struct ksmbd_file *fp, if (!atomic_inc_not_zero(&opinfo->refcount)) continue;
- if (ksmbd_conn_releasing(opinfo->conn)) + if (ksmbd_conn_releasing(opinfo->conn)) { + opinfo_put(opinfo); continue; + }
oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE, NULL); opinfo_put(opinfo); @@ -1139,8 +1141,11 @@ void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp) if (!atomic_inc_not_zero(&opinfo->refcount)) continue;
- if (ksmbd_conn_releasing(opinfo->conn)) + if (ksmbd_conn_releasing(opinfo->conn)) { + opinfo_put(opinfo); continue; + } + oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE, NULL); opinfo_put(opinfo); } @@ -1343,8 +1348,10 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp, if (!atomic_inc_not_zero(&brk_op->refcount)) continue;
- if (ksmbd_conn_releasing(brk_op->conn)) + if (ksmbd_conn_releasing(brk_op->conn)) { + opinfo_put(brk_op); continue; + }
if (brk_op->is_lease && (brk_op->o_lease->state & (~(SMB2_LEASE_READ_CACHING_LE | diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index d3dd3b9b4005..85e7bc3a2bd3 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -6011,7 +6011,6 @@ static int smb2_create_link(struct ksmbd_work *work, { char *link_name = NULL, *target_name = NULL, *pathname = NULL; struct path path, parent_path; - bool file_present = false; int rc;
if (buf_len < (u64)sizeof(struct smb2_file_link_info) + @@ -6044,11 +6043,8 @@ static int smb2_create_link(struct ksmbd_work *work, if (rc) { if (rc != -ENOENT) goto out; - } else - file_present = true; - - if (file_info->ReplaceIfExists) { - if (file_present) { + } else { + if (file_info->ReplaceIfExists) { rc = ksmbd_vfs_remove_file(work, &path); if (rc) { rc = -EINVAL; @@ -6056,21 +6052,17 @@ static int smb2_create_link(struct ksmbd_work *work, link_name); goto out; } - } - } else { - if (file_present) { + } else { rc = -EEXIST; ksmbd_debug(SMB, "link already exists\n"); goto out; } + ksmbd_vfs_kern_path_unlock(&parent_path, &path); } - rc = ksmbd_vfs_link(work, target_name, link_name); if (rc) rc = -EINVAL; out: - if (file_present) - ksmbd_vfs_kern_path_unlock(&parent_path, &path);
if (!IS_ERR(link_name)) kfree(link_name); diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c index 6c3a57bff147..a4ff1167c9a1 100644 --- a/fs/smb/server/transport_rdma.c +++ b/fs/smb/server/transport_rdma.c @@ -2193,7 +2193,7 @@ int ksmbd_rdma_init(void) return 0; }
-void ksmbd_rdma_destroy(void) +void ksmbd_rdma_stop_listening(void) { if (!smb_direct_listener.cm_id) return; @@ -2202,7 +2202,10 @@ void ksmbd_rdma_destroy(void) rdma_destroy_id(smb_direct_listener.cm_id);
smb_direct_listener.cm_id = NULL; +}
+void ksmbd_rdma_destroy(void) +{ if (smb_direct_wq) { destroy_workqueue(smb_direct_wq); smb_direct_wq = NULL; diff --git a/fs/smb/server/transport_rdma.h b/fs/smb/server/transport_rdma.h index 77aee4e5c9dc..a2291b77488a 100644 --- a/fs/smb/server/transport_rdma.h +++ b/fs/smb/server/transport_rdma.h @@ -54,13 +54,15 @@ struct smb_direct_data_transfer {
#ifdef CONFIG_SMB_SERVER_SMBDIRECT int ksmbd_rdma_init(void); +void ksmbd_rdma_stop_listening(void); void ksmbd_rdma_destroy(void); bool ksmbd_rdma_capable_netdev(struct net_device *netdev); void init_smbd_max_io_size(unsigned int sz); unsigned int get_smbd_max_read_write_size(void); #else static inline int ksmbd_rdma_init(void) { return 0; } -static inline int ksmbd_rdma_destroy(void) { return 0; } +static inline void ksmbd_rdma_stop_listening(void) { } +static inline void ksmbd_rdma_destroy(void) { } static inline bool ksmbd_rdma_capable_netdev(struct net_device *netdev) { return false; } static inline void init_smbd_max_io_size(unsigned int sz) { } static inline unsigned int get_smbd_max_read_write_size(void) { return 0; } diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c index e86bc4a46068..53c536f2ce9f 100644 --- a/fs/smb/server/transport_tcp.c +++ b/fs/smb/server/transport_tcp.c @@ -87,7 +87,14 @@ static struct tcp_transport *alloc_transport(struct socket *client_sk) return NULL; }
+#if IS_ENABLED(CONFIG_IPV6) + if (client_sk->sk->sk_family == AF_INET6) + memcpy(&conn->inet6_addr, &client_sk->sk->sk_v6_daddr, 16); + else + conn->inet_addr = inet_sk(client_sk->sk)->inet_daddr; +#else conn->inet_addr = inet_sk(client_sk->sk)->inet_daddr; +#endif conn->transport = KSMBD_TRANS(t); KSMBD_TRANS(t)->conn = conn; KSMBD_TRANS(t)->ops = &ksmbd_tcp_transport_ops; @@ -231,7 +238,6 @@ static int ksmbd_kthread_fn(void *p) { struct socket *client_sk = NULL; struct interface *iface = (struct interface *)p; - struct inet_sock *csk_inet; struct ksmbd_conn *conn; int ret;
@@ -254,13 +260,27 @@ static int ksmbd_kthread_fn(void *p) /* * Limits repeated connections from clients with the same IP. */ - csk_inet = inet_sk(client_sk->sk); down_read(&conn_list_lock); list_for_each_entry(conn, &conn_list, conns_list) - if (csk_inet->inet_daddr == conn->inet_addr) { +#if IS_ENABLED(CONFIG_IPV6) + if (client_sk->sk->sk_family == AF_INET6) { + if (memcmp(&client_sk->sk->sk_v6_daddr, + &conn->inet6_addr, 16) == 0) { + ret = -EAGAIN; + break; + } + } else if (inet_sk(client_sk->sk)->inet_daddr == + conn->inet_addr) { + ret = -EAGAIN; + break; + } +#else + if (inet_sk(client_sk->sk)->inet_daddr == + conn->inet_addr) { ret = -EAGAIN; break; } +#endif up_read(&conn_list_lock); if (ret == -EAGAIN) continue; diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index 3a27d4268b3c..494d21777ed0 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -187,10 +187,15 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) unsigned short flags; unsigned int fragments; u64 lookup_table_start, xattr_id_table_start, next_table; - int err; + int err, devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
TRACE("Entered squashfs_fill_superblock\n");
+ if (!devblksize) { + errorf(fc, "squashfs: unable to set blocksize\n"); + return -EINVAL; + } + sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL); if (sb->s_fs_info == NULL) { ERROR("Failed to allocate squashfs_sb_info\n"); @@ -201,12 +206,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
msblk->panic_on_errors = (opts->errors == Opt_errors_panic);
- msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE); - if (!msblk->devblksize) { - errorf(fc, "squashfs: unable to set blocksize\n"); - return -EINVAL; - } - + msblk->devblksize = devblksize; msblk->devblksize_log2 = ffz(~msblk->devblksize);
mutex_init(&msblk->meta_index_mutex); diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c index 7d389dd5ed51..6b70965063d7 100644 --- a/fs/tracefs/inode.c +++ b/fs/tracefs/inode.c @@ -483,9 +483,20 @@ static int tracefs_d_revalidate(struct dentry *dentry, unsigned int flags) return !(ei && ei->is_freed); }
+static int tracefs_d_delete(const struct dentry *dentry) +{ + /* + * We want to keep eventfs dentries around but not tracefs + * ones. eventfs dentries have content in d_fsdata. + * Use d_fsdata to determine if it's a eventfs dentry or not. + */ + return dentry->d_fsdata == NULL; +} + static const struct dentry_operations tracefs_dentry_operations = { .d_revalidate = tracefs_d_revalidate, .d_release = tracefs_d_release, + .d_delete = tracefs_d_delete, };
static int trace_fill_super(struct super_block *sb, void *data, int silent) diff --git a/fs/udf/super.c b/fs/udf/super.c index 20dff9ed2471..cb13a07a4aa8 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -1409,7 +1409,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block, struct genericPartitionMap *gpm; uint16_t ident; struct buffer_head *bh; - unsigned int table_len; + unsigned int table_len, part_map_count; int ret;
bh = udf_read_tagged(sb, block, block, &ident); @@ -1430,7 +1430,16 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block, "logical volume"); if (ret) goto out_bh; - ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps)); + + part_map_count = le32_to_cpu(lvd->numPartitionMaps); + if (part_map_count > table_len / sizeof(struct genericPartitionMap1)) { + udf_err(sb, "error loading logical volume descriptor: " + "Too many partition maps (%u > %u)\n", part_map_count, + table_len / (unsigned)sizeof(struct genericPartitionMap1)); + ret = -EIO; + goto out_bh; + } + ret = udf_sb_alloc_partition_maps(sb, part_map_count); if (ret) goto out_bh;
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index f5377ba5967a..68af6ae3b5d5 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -422,11 +422,15 @@ xfs_inumbers( .breq = breq, }; struct xfs_trans *tp; + unsigned int iwalk_flags = 0; int error = 0;
if (xfs_bulkstat_already_done(breq->mp, breq->startino)) return 0;
+ if (breq->flags & XFS_IBULK_SAME_AG) + iwalk_flags |= XFS_IWALK_SAME_AG; + /* * Grab an empty transaction so that we can use its recursive buffer * locking abilities to detect cycles in the inobt without deadlocking. @@ -435,7 +439,7 @@ xfs_inumbers( if (error) goto out;
- error = xfs_inobt_walk(breq->mp, tp, breq->startino, breq->flags, + error = xfs_inobt_walk(breq->mp, tp, breq->startino, iwalk_flags, xfs_inumbers_walk, breq->icount, &ic); xfs_trans_cancel(tp); out: diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index a07b510e7dc5..a63d61ca55af 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -27,6 +27,13 @@ static inline unsigned long topology_get_cpu_scale(int cpu)
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
+DECLARE_PER_CPU(unsigned long, capacity_freq_ref); + +static inline unsigned long topology_get_freq_ref(int cpu) +{ + return per_cpu(capacity_freq_ref, cpu); +} + DECLARE_PER_CPU(unsigned long, arch_freq_scale);
static inline unsigned long topology_get_freq_scale(int cpu) @@ -92,6 +99,7 @@ void update_siblings_masks(unsigned int cpu); void remove_cpu_topology(unsigned int cpuid); void reset_cpu_topology(void); int parse_acpi_topology(void); +void freq_inv_set_max_ratio(int cpu, u64 max_rate); #endif
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */ diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 92c8997b1938..b9c0b3281ace 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -379,6 +379,8 @@ enum req_op { REQ_OP_DISCARD = (__force blk_opf_t)3, /* securely erase sectors */ REQ_OP_SECURE_ERASE = (__force blk_opf_t)5, + /* write data at the current zone write pointer */ + REQ_OP_ZONE_APPEND = (__force blk_opf_t)7, /* write the zero filled sector many times */ REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9, /* Open a zone */ @@ -386,9 +388,7 @@ enum req_op { /* Close a zone */ REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11, /* Transition a zone to full */ - REQ_OP_ZONE_FINISH = (__force blk_opf_t)12, - /* write data at the current zone write pointer */ - REQ_OP_ZONE_APPEND = (__force blk_opf_t)13, + REQ_OP_ZONE_FINISH = (__force blk_opf_t)13, /* reset a zone write pointer */ REQ_OP_ZONE_RESET = (__force blk_opf_t)15, /* reset all the zone present on the device */ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 5a4054f17cbc..e84ed3a43f1f 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -234,14 +234,6 @@ static inline void *offset_to_ptr(const int *off) #define __ADDRESSABLE(sym) \ ___ADDRESSABLE(sym, __section(".discard.addressable"))
-#define __ADDRESSABLE_ASM(sym) \ - .pushsection .discard.addressable,"aw"; \ - .align ARCH_SEL(8,4); \ - ARCH_SEL(.quad, .long) __stringify(sym); \ - .popsection; - -#define __ADDRESSABLE_ASM_STR(sym) __stringify(__ADDRESSABLE_ASM(sym)) - /* &a[0] degrades to a pointer: a different type from an array */ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 184a84dd467e..bfecd9dcb552 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -1245,6 +1245,7 @@ void arch_set_freq_scale(const struct cpumask *cpus, { } #endif + /* the following are really really optional */ extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs; diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index adec808b371a..88d91e087471 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -224,7 +224,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, unsigned long max_util, unsigned long sum_util, unsigned long allowed_cpu_cap) { - unsigned long freq, scale_cpu; + unsigned long freq, ref_freq, scale_cpu; struct em_perf_state *ps; int cpu;
@@ -241,10 +241,10 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, */ cpu = cpumask_first(to_cpumask(pd->cpus)); scale_cpu = arch_scale_cpu_capacity(cpu); - ps = &pd->table[pd->nr_perf_states - 1]; + ref_freq = arch_scale_freq_ref(cpu);
max_util = min(max_util, allowed_cpu_cap); - freq = map_util_freq(max_util, ps->frequency, scale_cpu); + freq = map_util_freq(max_util, ref_freq, scale_cpu);
/* * Find the lowest performance state of the Energy Model above the diff --git a/include/linux/fs.h b/include/linux/fs.h index b641a01512fb..4cdeeaedaa40 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -456,7 +456,7 @@ extern const struct address_space_operations empty_aops; * It is also used to block modification of page cache contents through * memory mappings. * @gfp_mask: Memory allocation flags to use for allocating pages. - * @i_mmap_writable: Number of VM_SHARED mappings. + * @i_mmap_writable: Number of VM_SHARED, VM_MAYWRITE mappings. * @nr_thps: Number of THPs in the pagecache (non-shmem only). * @i_mmap: Tree of private and shared mappings. * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable. @@ -559,7 +559,7 @@ static inline int mapping_mapped(struct address_space *mapping)
/* * Might pages of this file have been modified in userspace? - * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap + * Note that i_mmap_writable counts all VM_SHARED, VM_MAYWRITE vmas: do_mmap * marks vma as VM_SHARED if it is shared, and the file was opened for * writing i.e. vma may be mprotected writable even if now readonly. * diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h index 9efbc54e35e5..be5417303ecf 100644 --- a/include/linux/hypervisor.h +++ b/include/linux/hypervisor.h @@ -37,6 +37,9 @@ static inline bool hypervisor_isolated_pci_functions(void) if (IS_ENABLED(CONFIG_S390)) return true;
+ if (IS_ENABLED(CONFIG_LOONGARCH)) + return true; + return jailhouse_paravirt(); }
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 430749a0f362..272d9ad73960 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -79,11 +79,6 @@ static inline struct vlan_ethhdr *skb_vlan_eth_hdr(const struct sk_buff *skb) /* found in socket.c */ extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
-static inline bool is_vlan_dev(const struct net_device *dev) -{ - return dev->priv_flags & IFF_802_1Q_VLAN; -} - #define skb_vlan_tag_present(__skb) (!!(__skb)->vlan_all) #define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci) #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) @@ -199,6 +194,11 @@ struct vlan_dev_priv { #endif };
+static inline bool is_vlan_dev(const struct net_device *dev) +{ + return dev->priv_flags & IFF_802_1Q_VLAN; +} + static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) { return netdev_priv(dev); @@ -236,6 +236,11 @@ extern void vlan_vids_del_by_dev(struct net_device *dev, extern bool vlan_uses_dev(const struct net_device *dev);
#else +static inline bool is_vlan_dev(const struct net_device *dev) +{ + return false; +} + static inline struct net_device * __vlan_find_dev_deep_rcu(struct net_device *real_dev, __be16 vlan_proto, u16 vlan_id) @@ -253,19 +258,19 @@ vlan_for_each(struct net_device *dev,
static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) { - BUG(); + WARN_ON_ONCE(1); return NULL; }
static inline u16 vlan_dev_vlan_id(const struct net_device *dev) { - BUG(); + WARN_ON_ONCE(1); return 0; }
static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev) { - BUG(); + WARN_ON_ONCE(1); return 0; }
diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h index cb71aa616bd3..631d58d0b838 100644 --- a/include/linux/iosys-map.h +++ b/include/linux/iosys-map.h @@ -264,12 +264,7 @@ static inline bool iosys_map_is_set(const struct iosys_map *map) */ static inline void iosys_map_clear(struct iosys_map *map) { - if (map->is_iomem) { - map->vaddr_iomem = NULL; - map->is_iomem = false; - } else { - map->vaddr = NULL; - } + memset(map, 0, sizeof(*map)); }
/** diff --git a/include/linux/memfd.h b/include/linux/memfd.h index e7abf6fa4c52..40cc726a8a0c 100644 --- a/include/linux/memfd.h +++ b/include/linux/memfd.h @@ -6,11 +6,25 @@
#ifdef CONFIG_MEMFD_CREATE extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg); +unsigned int *memfd_file_seals_ptr(struct file *file); #else static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a) { return -EINVAL; } + +static inline unsigned int *memfd_file_seals_ptr(struct file *file) +{ + return NULL; +} #endif
+/* Retrieve memfd seals associated with the file, if any. */ +static inline unsigned int memfd_file_seals(struct file *file) +{ + unsigned int *sealsp = memfd_file_seals_ptr(file); + + return sealsp ? *sealsp : 0; +} + #endif /* __LINUX_MEMFD_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index ee26e37daa0a..b97d8a691b28 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -941,6 +941,17 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma) return vma->vm_flags & VM_ACCESS_FLAGS; }
+static inline bool is_shared_maywrite(vm_flags_t vm_flags) +{ + return (vm_flags & (VM_SHARED | VM_MAYWRITE)) == + (VM_SHARED | VM_MAYWRITE); +} + +static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma) +{ + return is_shared_maywrite(vma->vm_flags); +} + static inline struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max) { @@ -4011,34 +4022,57 @@ void mem_dump_obj(void *object); static inline void mem_dump_obj(void *object) {} #endif
+static inline bool is_write_sealed(int seals) +{ + return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE); +} + +/** + * is_readonly_sealed - Checks whether write-sealed but mapped read-only, + * in which case writes should be disallowing moving + * forwards. + * @seals: the seals to check + * @vm_flags: the VMA flags to check + * + * Returns whether readonly sealed, in which case writess should be disallowed + * going forward. + */ +static inline bool is_readonly_sealed(int seals, vm_flags_t vm_flags) +{ + /* + * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as + * MAP_SHARED and read-only, take care to not allow mprotect to + * revert protections on such mappings. Do this only for shared + * mappings. For private mappings, don't need to mask + * VM_MAYWRITE as we still want them to be COW-writable. + */ + if (is_write_sealed(seals) && + ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_SHARED)) + return true; + + return false; +} + /** - * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it + * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and + * handle them. * @seals: the seals to check * @vma: the vma to operate on * - * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on - * the vma flags. Return 0 if check pass, or <0 for errors. - */ -static inline int seal_check_future_write(int seals, struct vm_area_struct *vma) -{ - if (seals & F_SEAL_FUTURE_WRITE) { - /* - * New PROT_WRITE and MAP_SHARED mmaps are not allowed when - * "future write" seal active. - */ - if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) - return -EPERM; - - /* - * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as - * MAP_SHARED and read-only, take care to not allow mprotect to - * revert protections on such mappings. Do this only for shared - * mappings. For private mappings, don't need to mask - * VM_MAYWRITE as we still want them to be COW-writable. - */ - if (vma->vm_flags & VM_SHARED) - vm_flags_clear(vma, VM_MAYWRITE); - } + * Check whether F_SEAL_WRITE or F_SEAL_FUTURE_WRITE are set; if so, do proper + * check/handling on the vma flags. Return 0 if check pass, or <0 for errors. + */ +static inline int seal_check_write(int seals, struct vm_area_struct *vma) +{ + if (!is_write_sealed(seals)) + return 0; + + /* + * New PROT_WRITE and MAP_SHARED mmaps are not allowed when + * write seals are active. + */ + if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) + return -EPERM;
return 0; } diff --git a/include/linux/pci.h b/include/linux/pci.h index 2d1fb935a8c8..ac5bd1718af2 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -318,7 +318,14 @@ struct pci_sriov; struct pci_p2pdma; struct rcec_ea;
-/* The pci_dev structure describes PCI devices */ +/* struct pci_dev - describes a PCI device + * + * @is_hotplug_bridge: Hotplug bridge of any kind (e.g. PCIe Hot-Plug Capable, + * Conventional PCI Hot-Plug, ACPI slot). + * Such bridges are allocated additional MMIO and bus + * number resources to allow for hierarchy expansion. + * @is_pciehp: PCIe Hot-Plug Capable bridge. + */ struct pci_dev { struct list_head bus_list; /* Node in per-bus list */ struct pci_bus *bus; /* Bus this device is on */ @@ -439,6 +446,7 @@ struct pci_dev { unsigned int is_physfn:1; unsigned int is_virtfn:1; unsigned int is_hotplug_bridge:1; + unsigned int is_pciehp:1; unsigned int shpc_managed:1; /* SHPC owned by shpchp */ unsigned int is_thunderbolt:1; /* Thunderbolt controller */ /* diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 406855d73901..4b74f0f012a5 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -73,7 +73,8 @@ extern int pm_runtime_force_resume(struct device *dev); extern int __pm_runtime_idle(struct device *dev, int rpmflags); extern int __pm_runtime_suspend(struct device *dev, int rpmflags); extern int __pm_runtime_resume(struct device *dev, int rpmflags); -extern int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count); +extern int pm_runtime_get_if_active(struct device *dev); +extern int pm_runtime_get_if_in_use(struct device *dev); extern int pm_schedule_suspend(struct device *dev, unsigned int delay); extern int __pm_runtime_set_status(struct device *dev, unsigned int status); extern int pm_runtime_barrier(struct device *dev); @@ -95,18 +96,6 @@ extern void pm_runtime_release_supplier(struct device_link *link);
extern int devm_pm_runtime_enable(struct device *dev);
-/** - * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter. - * @dev: Target device. - * - * Increment the runtime PM usage counter of @dev if its runtime PM status is - * %RPM_ACTIVE and its runtime PM usage counter is greater than 0. - */ -static inline int pm_runtime_get_if_in_use(struct device *dev) -{ - return pm_runtime_get_if_active(dev, false); -} - /** * pm_suspend_ignore_children - Set runtime PM behavior regarding children. * @dev: Target device. @@ -277,8 +266,7 @@ static inline int pm_runtime_get_if_in_use(struct device *dev) { return -EINVAL; } -static inline int pm_runtime_get_if_active(struct device *dev, - bool ign_usage_count) +static inline int pm_runtime_get_if_active(struct device *dev) { return -EINVAL; } diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 67b573d5bf28..9671b7234684 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -275,6 +275,14 @@ void arch_update_thermal_pressure(const struct cpumask *cpus, { } #endif
+#ifndef arch_scale_freq_ref +static __always_inline +unsigned int arch_scale_freq_ref(int cpu) +{ + return 0; +} +#endif + static inline int task_node(const struct task_struct *p) { return cpu_to_node(task_cpu(p)); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7b7222b4f611..3a558a3c2cca 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3556,7 +3556,13 @@ static inline void *skb_frag_address(const skb_frag_t *frag) */ static inline void *skb_frag_address_safe(const skb_frag_t *frag) { - void *ptr = page_address(skb_frag_page(frag)); + struct page *page = skb_frag_page(frag); + void *ptr; + + if (!page) + return NULL; + + ptr = page_address(page); if (unlikely(!ptr)) return NULL;
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index 2d207cb4837d..4ac082a63173 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h @@ -119,6 +119,7 @@ struct cdc_ncm_ctx { u32 timer_interval; u32 max_ndp_size; u8 is_ndp16; + u8 filtering_supported; union { struct usb_cdc_ncm_ndp16 *delayed_ndp16; struct usb_cdc_ncm_ndp32 *delayed_ndp32; diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index fbf30721bac9..5148b035a8f3 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -110,7 +110,12 @@ static inline size_t virtio_vsock_skb_len(struct sk_buff *skb) return (size_t)(skb_end_pointer(skb) - skb->head); }
-#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4) +/* Dimension the RX SKB so that the entire thing fits exactly into + * a single 4KiB page. This avoids wasting memory due to alloc_skb() + * rounding up to the next page order and also means that we + * don't leave higher-order pages sitting around in the RX queue. + */ +#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE SKB_WITH_OVERHEAD(1024 * 4) #define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64)
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h index c5e57c6bd873..078e16d2512a 100644 --- a/include/net/bond_3ad.h +++ b/include/net/bond_3ad.h @@ -54,6 +54,8 @@ typedef enum { AD_MUX_DETACHED, /* mux machine */ AD_MUX_WAITING, /* mux machine */ AD_MUX_ATTACHED, /* mux machine */ + AD_MUX_COLLECTING, /* mux machine */ + AD_MUX_DISTRIBUTING, /* mux machine */ AD_MUX_COLLECTING_DISTRIBUTING /* mux machine */ } mux_states_t;
@@ -302,6 +304,7 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); int bond_3ad_set_carrier(struct bonding *bond); void bond_3ad_update_lacp_rate(struct bonding *bond); +void bond_3ad_update_lacp_active(struct bonding *bond); void bond_3ad_update_ad_actor_settings(struct bonding *bond); int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats); size_t bond_3ad_stats_size(void); diff --git a/include/net/bond_options.h b/include/net/bond_options.h index f631d9f09941..18687ccf0638 100644 --- a/include/net/bond_options.h +++ b/include/net/bond_options.h @@ -76,6 +76,7 @@ enum { BOND_OPT_MISSED_MAX, BOND_OPT_NS_TARGETS, BOND_OPT_PRIO, + BOND_OPT_COUPLED_CONTROL, BOND_OPT_LAST };
diff --git a/include/net/bonding.h b/include/net/bonding.h index 94594026a5c5..8bb5f016969f 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -148,6 +148,7 @@ struct bond_params { #if IS_ENABLED(CONFIG_IPV6) struct in6_addr ns_targets[BOND_MAX_NS_TARGETS]; #endif + int coupled_control;
/* 2 bytes of padding : see ether_addr_equal_64bits() */ u8 ad_actor_system[ETH_ALEN + 2]; @@ -167,6 +168,7 @@ struct slave { u8 backup:1, /* indicates backup slave. Value corresponds with BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ inactive:1, /* indicates inactive slave */ + rx_disabled:1, /* indicates whether slave's Rx is disabled */ should_notify:1, /* indicates whether the state changed */ should_notify_link:1; /* indicates whether the link changed */ u8 duplex; @@ -568,6 +570,14 @@ static inline void bond_set_slave_inactive_flags(struct slave *slave, bond_set_slave_state(slave, BOND_STATE_BACKUP, notify); if (!slave->bond->params.all_slaves_active) slave->inactive = 1; + if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) + slave->rx_disabled = 1; +} + +static inline void bond_set_slave_tx_disabled_flags(struct slave *slave, + bool notify) +{ + bond_set_slave_state(slave, BOND_STATE_BACKUP, notify); }
static inline void bond_set_slave_active_flags(struct slave *slave, @@ -575,6 +585,14 @@ static inline void bond_set_slave_active_flags(struct slave *slave, { bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify); slave->inactive = 0; + if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) + slave->rx_disabled = 0; +} + +static inline void bond_set_slave_rx_enabled_flags(struct slave *slave, + bool notify) +{ + slave->rx_disabled = 0; }
static inline bool bond_is_slave_inactive(struct slave *slave) @@ -582,6 +600,11 @@ static inline bool bond_is_slave_inactive(struct slave *slave) return slave->inactive; }
+static inline bool bond_is_slave_rx_disabled(struct slave *slave) +{ + return slave->rx_disabled; +} + static inline void bond_propose_link_state(struct slave *slave, int state) { slave->link_new_state = state; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 2fb3151ea7c9..5b3a63c377d6 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -559,7 +559,7 @@ ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband, { int i;
- if (WARN_ON(iftype >= NL80211_IFTYPE_MAX)) + if (WARN_ON(iftype >= NUM_NL80211_IFTYPES)) return NULL;
if (iftype == NL80211_IFTYPE_AP_VLAN) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 835a58ce9ca5..adaa1b2323d2 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -4111,6 +4111,8 @@ struct ieee80211_prep_tx_info { * @mgd_complete_tx: Notify the driver that the response frame for a previously * transmitted frame announced with @mgd_prepare_tx was received, the data * is filled similarly to @mgd_prepare_tx though the duration is not used. + * Note that this isn't always called for each mgd_prepare_tx() call, for + * example for SAE the 'confirm' messages can be on the air in any order. * * @mgd_protect_tdls_discover: Protect a TDLS discovery session. After sending * a TDLS discovery-request, we expect a reply to arrive on the AP's diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 0d28172193fa..d775906a65c7 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -180,6 +180,7 @@ struct pneigh_entry { netdevice_tracker dev_tracker; u32 flags; u8 protocol; + bool permanent; u32 key[]; };
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index ce3f84c6eb8e..5ddcadee62b7 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -293,6 +293,7 @@ static inline int check_net(const struct net *net) }
void net_drop_ns(void *); +void net_passive_dec(struct net *net);
#else
@@ -322,8 +323,23 @@ static inline int check_net(const struct net *net) }
#define net_drop_ns NULL + +static inline void net_passive_dec(struct net *net) +{ + refcount_dec(&net->passive); +} #endif
+static inline void net_passive_inc(struct net *net) +{ + refcount_inc(&net->passive); +} + +/* Returns true if the netns initialization is completed successfully */ +static inline bool net_initialized(const struct net *net) +{ + return READ_ONCE(net->list.next); +}
static inline void __netns_tracker_alloc(struct net *net, netns_tracker *tracker, diff --git a/include/net/sock.h b/include/net/sock.h index e15bea43b2ec..b5f7208a9ec3 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1859,6 +1859,7 @@ static inline bool sock_allow_reclassification(const struct sock *csk) struct sock *sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot, int kern); void sk_free(struct sock *sk); +void sk_net_refcnt_upgrade(struct sock *sk); void sk_destruct(struct sock *sk); struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); void sk_free_unlock_clone(struct sock *sk); diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 8ea1674069fe..f759109caeea 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -1857,7 +1857,7 @@ TRACE_EVENT(qgroup_update_counters,
TRACE_EVENT(qgroup_update_reserve,
- TP_PROTO(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup, + TP_PROTO(const struct btrfs_fs_info *fs_info, const struct btrfs_qgroup *qgroup, s64 diff, int type),
TP_ARGS(fs_info, qgroup, diff, type), @@ -1883,7 +1883,7 @@ TRACE_EVENT(qgroup_update_reserve,
TRACE_EVENT(qgroup_meta_reserve,
- TP_PROTO(struct btrfs_root *root, s64 diff, int type), + TP_PROTO(const struct btrfs_root *root, s64 diff, int type),
TP_ARGS(root, diff, type),
@@ -1906,7 +1906,7 @@ TRACE_EVENT(qgroup_meta_reserve,
TRACE_EVENT(qgroup_meta_convert,
- TP_PROTO(struct btrfs_root *root, s64 diff), + TP_PROTO(const struct btrfs_root *root, s64 diff),
TP_ARGS(root, diff),
diff --git a/include/trace/events/thp.h b/include/trace/events/thp.h index f50048af5fcc..c8fe879d5828 100644 --- a/include/trace/events/thp.h +++ b/include/trace/events/thp.h @@ -8,6 +8,7 @@ #include <linux/types.h> #include <linux/tracepoint.h>
+#ifdef CONFIG_PPC_BOOK3S_64 DECLARE_EVENT_CLASS(hugepage_set,
TP_PROTO(unsigned long addr, unsigned long pte), @@ -66,6 +67,7 @@ DEFINE_EVENT(hugepage_update, hugepage_update_pud, TP_PROTO(unsigned long addr, unsigned long pud, unsigned long clr, unsigned long set), TP_ARGS(addr, pud, clr, set) ); +#endif /* CONFIG_PPC_BOOK3S_64 */
DECLARE_EVENT_CLASS(migration_pmd,
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index ce3117df9cec..6750911da4f0 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -950,6 +950,7 @@ enum { IFLA_BOND_AD_LACP_ACTIVE, IFLA_BOND_MISSED_MAX, IFLA_BOND_NS_IP6_TARGET, + IFLA_BOND_COUPLED_CONTROL, __IFLA_BOND_MAX, };
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h index ff8d21f9e95b..5a47339ef7d7 100644 --- a/include/uapi/linux/in6.h +++ b/include/uapi/linux/in6.h @@ -152,7 +152,6 @@ struct in6_flowlabel_req { /* * IPV6 socket options */ -#if __UAPI_DEF_IPV6_OPTIONS #define IPV6_ADDRFORM 1 #define IPV6_2292PKTINFO 2 #define IPV6_2292HOPOPTS 3 @@ -169,8 +168,10 @@ struct in6_flowlabel_req { #define IPV6_MULTICAST_IF 17 #define IPV6_MULTICAST_HOPS 18 #define IPV6_MULTICAST_LOOP 19 +#if __UAPI_DEF_IPV6_OPTIONS #define IPV6_ADD_MEMBERSHIP 20 #define IPV6_DROP_MEMBERSHIP 21 +#endif #define IPV6_ROUTER_ALERT 22 #define IPV6_MTU_DISCOVER 23 #define IPV6_MTU 24 @@ -203,7 +204,6 @@ struct in6_flowlabel_req { #define IPV6_IPSEC_POLICY 34 #define IPV6_XFRM_POLICY 35 #define IPV6_HDRINCL 36 -#endif
/* * Multicast: diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 8e61f8b7c2ce..333769bc6abf 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -46,7 +46,7 @@ struct io_uring_sqe { }; __u32 len; /* buffer size or number of iovecs */ union { - __kernel_rwf_t rw_flags; + __u32 rw_flags; __u32 fsync_flags; __u16 poll_events; /* compatibility */ __u32 poll32_events; /* word-reversed for BE */ diff --git a/include/uapi/linux/pfrut.h b/include/uapi/linux/pfrut.h index 42fa15f8310d..b77d5c210c26 100644 --- a/include/uapi/linux/pfrut.h +++ b/include/uapi/linux/pfrut.h @@ -89,6 +89,7 @@ struct pfru_payload_hdr { __u32 hw_ver; __u32 rt_ver; __u8 platform_id[16]; + __u32 svn_ver; };
enum pfru_dsm_status { diff --git a/io_uring/net.c b/io_uring/net.c index e455f051e62e..e7f8a79e049c 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -351,6 +351,13 @@ static int io_setup_async_addr(struct io_kiocb *req, return -EAGAIN; }
+static void io_net_kbuf_recyle(struct io_kiocb *req) +{ + req->flags |= REQ_F_PARTIAL_IO; + if (req->flags & REQ_F_BUFFER_RING) + io_kbuf_recycle_ring(req); +} + int io_sendmsg_prep_async(struct io_kiocb *req) { int ret; @@ -442,7 +449,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) kmsg->msg.msg_controllen = 0; kmsg->msg.msg_control = NULL; sr->done_io += ret; - req->flags |= REQ_F_PARTIAL_IO; + io_net_kbuf_recyle(req); return io_setup_async_msg(req, kmsg, issue_flags); } if (ret == -ERESTARTSYS) @@ -521,7 +528,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags) sr->len -= ret; sr->buf += ret; sr->done_io += ret; - req->flags |= REQ_F_PARTIAL_IO; + io_net_kbuf_recyle(req); return io_setup_async_addr(req, &__address, issue_flags); } if (ret == -ERESTARTSYS) @@ -891,7 +898,7 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) } if (ret > 0 && io_net_retry(sock, flags)) { sr->done_io += ret; - req->flags |= REQ_F_PARTIAL_IO; + io_net_kbuf_recyle(req); return io_setup_async_msg(req, kmsg, issue_flags); } if (ret == -ERESTARTSYS) @@ -991,7 +998,7 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags) sr->len -= ret; sr->buf += ret; sr->done_io += ret; - req->flags |= REQ_F_PARTIAL_IO; + io_net_kbuf_recyle(req); return -EAGAIN; } if (ret == -ERESTARTSYS) @@ -1235,7 +1242,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) zc->len -= ret; zc->buf += ret; zc->done_io += ret; - req->flags |= REQ_F_PARTIAL_IO; + io_net_kbuf_recyle(req); return io_setup_async_addr(req, &__address, issue_flags); } if (ret == -ERESTARTSYS) @@ -1306,7 +1313,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
if (ret > 0 && io_net_retry(sock, flags)) { sr->done_io += ret; - req->flags |= REQ_F_PARTIAL_IO; + io_net_kbuf_recyle(req); return io_setup_async_msg(req, kmsg, issue_flags); } if (ret == -ERESTARTSYS) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 1f9ae600e445..7d6ee41f4b4f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -460,7 +460,8 @@ static bool reg_not_null(const struct bpf_reg_state *reg) type == PTR_TO_MAP_KEY || type == PTR_TO_SOCK_COMMON || (type == PTR_TO_BTF_ID && is_trusted_reg(reg)) || - type == PTR_TO_MEM; + type == PTR_TO_MEM || + type == CONST_PTR_TO_MAP; }
static bool type_is_ptr_alloc_obj(u32 type) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index ad8b62202bdc..eadb028916c8 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -432,7 +432,7 @@ static inline void check_insane_mems_config(nodemask_t *nodes) { if (!cpusets_insane_config() && movable_only_nodes(nodes)) { - static_branch_enable(&cpusets_insane_config_key); + static_branch_enable_cpuslocked(&cpusets_insane_config_key); pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n" "Cpuset allocations might fail even with a lot of memory available.\n", nodemask_pr_args(nodes)); diff --git a/kernel/fork.c b/kernel/fork.c index 7966c9a1c163..0e20d7e94608 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -739,7 +739,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
get_file(file); i_mmap_lock_write(mapping); - if (tmp->vm_flags & VM_SHARED) + if (vma_is_shared_maywrite(tmp)) mapping_allow_writable(mapping); flush_dcache_mmap_lock(mapping); /* insert tmp into the share list, just after mpnt */ diff --git a/kernel/module/main.c b/kernel/module/main.c index 9711ad14825b..627680e568fc 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -701,14 +701,16 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user, struct module *mod; char name[MODULE_NAME_LEN]; char buf[MODULE_FLAGS_BUF_SIZE]; - int ret, forced = 0; + int ret, len, forced = 0;
if (!capable(CAP_SYS_MODULE) || modules_disabled) return -EPERM;
- if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0) - return -EFAULT; - name[MODULE_NAME_LEN-1] = '\0'; + len = strncpy_from_user(name, name_user, MODULE_NAME_LEN); + if (len == 0 || len == MODULE_NAME_LEN) + return -ENOENT; + if (len < 0) + return len;
audit_log_kern_module(name);
diff --git a/kernel/power/console.c b/kernel/power/console.c index fcdf0e14a47d..19c48aa5355d 100644 --- a/kernel/power/console.c +++ b/kernel/power/console.c @@ -16,6 +16,7 @@ #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
static int orig_fgconsole, orig_kmsg; +static bool vt_switch_done;
static DEFINE_MUTEX(vt_switch_mutex);
@@ -136,17 +137,21 @@ void pm_prepare_console(void) if (orig_fgconsole < 0) return;
+ vt_switch_done = true; + orig_kmsg = vt_kmsg_redirect(SUSPEND_CONSOLE); return; }
void pm_restore_console(void) { - if (!pm_vt_switch()) + if (!pm_vt_switch() && !vt_switch_done) return;
if (orig_fgconsole >= 0) { vt_move_to_console(orig_fgconsole, 0); vt_kmsg_redirect(orig_kmsg); } + + vt_switch_done = false; } diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 536acebf22b0..607b2e68fa4c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -4427,6 +4427,8 @@ int rcutree_prepare_cpu(unsigned int cpu) rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + + rcu_preempt_deferred_qs_init(rdp); rcu_spawn_one_boost_kthread(rnp); rcu_spawn_cpu_nocb_kthread(cpu); WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index ac8cc756920d..71403d22a846 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -175,6 +175,17 @@ struct rcu_snap_record { unsigned long jiffies; /* Track jiffies value */ };
+/* + * An IRQ work (deferred_qs_iw) is used by RCU to get the scheduler's attention. + * to report quiescent states at the soonest possible time. + * The request can be in one of the following states: + * - DEFER_QS_IDLE: An IRQ work is yet to be scheduled. + * - DEFER_QS_PENDING: An IRQ work was scheduled but either not yet run, or it + * ran and we still haven't reported a quiescent state. + */ +#define DEFER_QS_IDLE 0 +#define DEFER_QS_PENDING 1 + /* Per-CPU data for read-copy update. */ struct rcu_data { /* 1) quiescent-state and grace-period handling : */ @@ -192,7 +203,7 @@ struct rcu_data { /* during and after the last grace */ /* period it is aware of. */ struct irq_work defer_qs_iw; /* Obtain later scheduler attention. */ - bool defer_qs_iw_pending; /* Scheduler attention pending? */ + int defer_qs_iw_pending; /* Scheduler attention pending? */ struct work_struct strict_work; /* Schedule readers for strict GPs. */
/* 2) batch handling */ @@ -452,6 +463,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp); static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); static void rcu_flavor_sched_clock_irq(int user); static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck); +static void rcu_preempt_deferred_qs_init(struct rcu_data *rdp); static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); static bool rcu_is_callbacks_kthread(struct rcu_data *rdp); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 94b715139f52..8707f155afb6 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -474,13 +474,16 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) struct rcu_node *rnp; union rcu_special special;
+ rdp = this_cpu_ptr(&rcu_data); + if (rdp->defer_qs_iw_pending == DEFER_QS_PENDING) + rdp->defer_qs_iw_pending = DEFER_QS_IDLE; + /* * If RCU core is waiting for this CPU to exit its critical section, * report the fact that it has exited. Because irqs are disabled, * t->rcu_read_unlock_special cannot change. */ special = t->rcu_read_unlock_special; - rdp = this_cpu_ptr(&rcu_data); if (!special.s && !rdp->cpu_no_qs.b.exp) { local_irq_restore(flags); return; @@ -612,10 +615,29 @@ notrace void rcu_preempt_deferred_qs(struct task_struct *t) */ static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp) { + unsigned long flags; struct rcu_data *rdp;
rdp = container_of(iwp, struct rcu_data, defer_qs_iw); - rdp->defer_qs_iw_pending = false; + local_irq_save(flags); + + /* + * If the IRQ work handler happens to run in the middle of RCU read-side + * critical section, it could be ineffective in getting the scheduler's + * attention to report a deferred quiescent state (the whole point of the + * IRQ work). For this reason, requeue the IRQ work. + * + * Basically, we want to avoid following situation: + * 1. rcu_read_unlock() queues IRQ work (state -> DEFER_QS_PENDING) + * 2. CPU enters new rcu_read_lock() + * 3. IRQ work runs but cannot report QS due to rcu_preempt_depth() > 0 + * 4. rcu_read_unlock() does not re-queue work (state still PENDING) + * 5. Deferred QS reporting does not happen. + */ + if (rcu_preempt_depth() > 0) + WRITE_ONCE(rdp->defer_qs_iw_pending, DEFER_QS_IDLE); + + local_irq_restore(flags); }
/* @@ -661,17 +683,11 @@ static void rcu_read_unlock_special(struct task_struct *t) set_tsk_need_resched(current); set_preempt_need_resched(); if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled && - expboost && !rdp->defer_qs_iw_pending && cpu_online(rdp->cpu)) { + expboost && rdp->defer_qs_iw_pending != DEFER_QS_PENDING && + cpu_online(rdp->cpu)) { // Get scheduler to re-evaluate and call hooks. // If !IRQ_WORK, FQS scan will eventually IPI. - if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && - IS_ENABLED(CONFIG_PREEMPT_RT)) - rdp->defer_qs_iw = IRQ_WORK_INIT_HARD( - rcu_preempt_deferred_qs_handler); - else - init_irq_work(&rdp->defer_qs_iw, - rcu_preempt_deferred_qs_handler); - rdp->defer_qs_iw_pending = true; + rdp->defer_qs_iw_pending = DEFER_QS_PENDING; irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu); } } @@ -810,6 +826,10 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck) } }
+static void rcu_preempt_deferred_qs_init(struct rcu_data *rdp) +{ + rdp->defer_qs_iw = IRQ_WORK_INIT_HARD(rcu_preempt_deferred_qs_handler); +} #else /* #ifdef CONFIG_PREEMPT_RCU */
/* @@ -1009,6 +1029,8 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck) WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); }
+static void rcu_preempt_deferred_qs_init(struct rcu_data *rdp) { } + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
/* diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 776be0549162..819ec1ccc08c 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -137,6 +137,32 @@ static void sugov_deferred_update(struct sugov_policy *sg_policy) } }
+/** + * get_capacity_ref_freq - get the reference frequency that has been used to + * correlate frequency and compute capacity for a given cpufreq policy. We use + * the CPU managing it for the arch_scale_freq_ref() call in the function. + * @policy: the cpufreq policy of the CPU in question. + * + * Return: the reference CPU frequency to compute a capacity. + */ +static __always_inline +unsigned long get_capacity_ref_freq(struct cpufreq_policy *policy) +{ + unsigned int freq = arch_scale_freq_ref(policy->cpu); + + if (freq) + return freq; + + if (arch_scale_freq_invariant()) + return policy->cpuinfo.max_freq; + + /* + * Apply a 25% margin so that we select a higher frequency than + * the current one before the CPU is fully busy: + */ + return policy->cur + (policy->cur >> 2); +} + /** * get_next_freq - Compute a new frequency for a given cpufreq policy. * @sg_policy: schedutil policy object to compute the new frequency for. @@ -163,9 +189,9 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, unsigned long util, unsigned long max) { struct cpufreq_policy *policy = sg_policy->policy; - unsigned int freq = arch_scale_freq_invariant() ? - policy->cpuinfo.max_freq : policy->cur; + unsigned int freq;
+ freq = get_capacity_ref_freq(policy); freq = map_util_freq(util, freq, max);
if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6ce3028e6e85..1cf43e91ae9d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -11697,8 +11697,14 @@ static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost) /* * Track max cost of a domain to make sure to not delay the * next wakeup on the CPU. + * + * sched_balance_newidle() bumps the cost whenever newidle + * balance fails, and we don't want things to grow out of + * control. Use the sysctl_sched_migration_cost as the upper + * limit, plus a litle extra to avoid off by ones. */ - sd->max_newidle_lb_cost = cost; + sd->max_newidle_lb_cost = + min(cost, sysctl_sched_migration_cost + 200); sd->last_decay_max_lb_cost = jiffies; } else if (time_after(jiffies, sd->last_decay_max_lb_cost + HZ)) { /* @@ -12384,10 +12390,17 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
t1 = sched_clock_cpu(this_cpu); domain_cost = t1 - t0; - update_newidle_cost(sd, domain_cost); - curr_cost += domain_cost; t0 = t1; + + /* + * Failing newidle means it is not effective; + * bump the cost so we end up doing less of it. + */ + if (!pulled_task) + domain_cost = (3 * sd->max_newidle_lb_cost) / 2; + + update_newidle_cost(sd, domain_cost); }
/* diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 4351b9069a91..15785a729a0c 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -4058,13 +4058,17 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, } else { iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); } + } else { + if (hash) + iter->hash = alloc_and_copy_ftrace_hash(hash->size_bits, hash); + else + iter->hash = EMPTY_HASH; + }
- if (!iter->hash) { - trace_parser_put(&iter->parser); - goto out_unlock; - } - } else - iter->hash = hash; + if (!iter->hash) { + trace_parser_put(&iter->parser); + goto out_unlock; + }
ret = 0;
@@ -5922,9 +5926,6 @@ int ftrace_regex_release(struct inode *inode, struct file *file) ftrace_hash_move_and_update_ops(iter->ops, orig_hash, iter->hash, filter_hash); mutex_unlock(&ftrace_lock); - } else { - /* For read only, the hash is the ops hash */ - iter->hash = NULL; }
mutex_unlock(&iter->ops->func_hash->regex_lock); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 946350c98b53..907e45361939 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1661,7 +1661,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
ret = get_user(ch, ubuf++); if (ret) - goto out; + goto fail;
read++; cnt--; @@ -1675,7 +1675,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, while (cnt && isspace(ch)) { ret = get_user(ch, ubuf++); if (ret) - goto out; + goto fail; read++; cnt--; } @@ -1685,8 +1685,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, /* only spaces were written */ if (isspace(ch) || !ch) { *ppos += read; - ret = read; - goto out; + return read; } }
@@ -1696,11 +1695,12 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, parser->buffer[parser->idx++] = ch; else { ret = -EINVAL; - goto out; + goto fail; } + ret = get_user(ch, ubuf++); if (ret) - goto out; + goto fail; read++; cnt--; } @@ -1716,13 +1716,13 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, parser->buffer[parser->idx] = 0; } else { ret = -EINVAL; - goto out; + goto fail; }
*ppos += read; - ret = read; - -out: + return read; +fail: + trace_parser_fail(parser); return ret; }
@@ -2211,10 +2211,10 @@ int __init register_tracer(struct tracer *type) mutex_unlock(&trace_types_lock);
if (ret || !default_bootup_tracer) - goto out_unlock; + return ret;
if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) - goto out_unlock; + return 0;
printk(KERN_INFO "Starting tracer '%s'\n", type->name); /* Do we want this tracer to start on bootup? */ @@ -2226,8 +2226,7 @@ int __init register_tracer(struct tracer *type) /* disable other selftests, since this will break it. */ disable_tracing_selftest("running a tracer");
- out_unlock: - return ret; + return 0; }
static void tracing_reset_cpu(struct array_buffer *buf, int cpu) @@ -8734,11 +8733,10 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, out_reg: ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) - goto out; + return ret;
ret = register_ftrace_function_probe(glob, tr, ops, count);
- out: return ret < 0 ? ret : 0; }
@@ -10344,7 +10342,7 @@ __init static int tracer_alloc_buffers(void) BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) - goto out; + return -ENOMEM;
if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) goto out_free_buffer_mask; @@ -10455,7 +10453,6 @@ __init static int tracer_alloc_buffers(void) free_cpumask_var(global_trace.tracing_cpumask); out_free_buffer_mask: free_cpumask_var(tracing_buffer_mask); -out: return ret; }
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index e3afb830fbcc..c91f3c47ac64 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -1153,6 +1153,7 @@ bool ftrace_event_is_function(struct trace_event_call *call); */ struct trace_parser { bool cont; + bool fail; char *buffer; unsigned idx; unsigned size; @@ -1160,7 +1161,7 @@ struct trace_parser {
static inline bool trace_parser_loaded(struct trace_parser *parser) { - return (parser->idx != 0); + return !parser->fail && parser->idx != 0; }
static inline bool trace_parser_cont(struct trace_parser *parser) @@ -1174,6 +1175,11 @@ static inline void trace_parser_clear(struct trace_parser *parser) parser->idx = 0; }
+static inline void trace_parser_fail(struct trace_parser *parser) +{ + parser->fail = true; +} + extern int trace_parser_get_init(struct trace_parser *parser, int size); extern void trace_parser_put(struct trace_parser *parser); extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, @@ -2053,7 +2059,7 @@ static inline bool is_good_system_name(const char *name) static inline void sanitize_event_name(char *name) { while (*name++ != '\0') - if (*name == ':' || *name == '.') + if (*name == ':' || *name == '.' || *name == '*') *name = '_'; }
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c index 68af76ca8bc9..0a5454fae316 100644 --- a/mm/debug_vm_pgtable.c +++ b/mm/debug_vm_pgtable.c @@ -1047,29 +1047,34 @@ static void __init destroy_args(struct pgtable_debug_args *args)
/* Free page table entries */ if (args->start_ptep) { + pmd_clear(args->pmdp); pte_free(args->mm, args->start_ptep); mm_dec_nr_ptes(args->mm); }
if (args->start_pmdp) { + pud_clear(args->pudp); pmd_free(args->mm, args->start_pmdp); mm_dec_nr_pmds(args->mm); }
if (args->start_pudp) { + p4d_clear(args->p4dp); pud_free(args->mm, args->start_pudp); mm_dec_nr_puds(args->mm); }
- if (args->start_p4dp) + if (args->start_p4dp) { + pgd_clear(args->pgdp); p4d_free(args->mm, args->start_p4dp); + }
/* Free vma and mm struct */ if (args->vma) vm_area_free(args->vma);
if (args->mm) - mmdrop(args->mm); + mmput(args->mm); }
static struct page * __init diff --git a/mm/filemap.c b/mm/filemap.c index 05eb77623a10..ab24dbf5e747 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3716,7 +3716,7 @@ int generic_file_mmap(struct file *file, struct vm_area_struct *vma) */ int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) { - if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) + if (vma_is_shared_maywrite(vma)) return -EINVAL; return generic_file_mmap(file, vma); } diff --git a/mm/kmemleak.c b/mm/kmemleak.c index f86d4e04d95e..e2e41de55c02 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -452,6 +452,7 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp) { unsigned long flags; struct kmemleak_object *object; + bool warn = false;
/* try the slab allocator first */ if (object_cache) { @@ -469,8 +470,10 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp) else if (mem_pool_free_count) object = &mem_pool[--mem_pool_free_count]; else - pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n"); + warn = true; raw_spin_unlock_irqrestore(&kmemleak_lock, flags); + if (warn) + pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
return object; } @@ -2006,6 +2009,7 @@ static const struct file_operations kmemleak_fops = { static void __kmemleak_do_cleanup(void) { struct kmemleak_object *object, *tmp; + unsigned int cnt = 0;
/* * Kmemleak has already been disabled, no need for RCU list traversal @@ -2014,6 +2018,10 @@ static void __kmemleak_do_cleanup(void) list_for_each_entry_safe(object, tmp, &object_list, object_list) { __remove_object(object); __delete_object(object); + + /* Call cond_resched() once per 64 iterations to avoid soft lockup */ + if (!(++cnt & 0x3f)) + cond_resched(); } }
diff --git a/mm/madvise.c b/mm/madvise.c index 9d2a6cb655ff..3d6370d3199f 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -987,7 +987,7 @@ static long madvise_remove(struct vm_area_struct *vma, return -EINVAL; }
- if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) + if (!vma_is_shared_maywrite(vma)) return -EACCES;
offset = (loff_t)(start - vma->vm_start) diff --git a/mm/memfd.c b/mm/memfd.c index 2dba2cb6f0d0..187265dc68f5 100644 --- a/mm/memfd.c +++ b/mm/memfd.c @@ -134,7 +134,7 @@ static int memfd_wait_for_pins(struct address_space *mapping) return error; }
-static unsigned int *memfd_file_seals_ptr(struct file *file) +unsigned int *memfd_file_seals_ptr(struct file *file) { if (shmem_file(file)) return &SHMEM_I(file_inode(file))->seals; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index a96840c41581..dae5e60d64e2 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -835,9 +835,17 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask, #define hwpoison_hugetlb_range NULL #endif
+static int hwpoison_test_walk(unsigned long start, unsigned long end, + struct mm_walk *walk) +{ + /* We also want to consider pages mapped into VM_PFNMAP. */ + return 0; +} + static const struct mm_walk_ops hwpoison_walk_ops = { .pmd_entry = hwpoison_pte_range, .hugetlb_entry = hwpoison_hugetlb_range, + .test_walk = hwpoison_test_walk, .walk_lock = PGWALK_RDLOCK, };
diff --git a/mm/mmap.c b/mm/mmap.c index a9c70001e456..8cf23a07ae50 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -47,6 +47,7 @@ #include <linux/oom.h> #include <linux/sched/mm.h> #include <linux/ksm.h> +#include <linux/memfd.h>
#include <linux/uaccess.h> #include <asm/cacheflush.h> @@ -107,7 +108,7 @@ void vma_set_page_prot(struct vm_area_struct *vma) static void __remove_shared_vm_struct(struct vm_area_struct *vma, struct file *file, struct address_space *mapping) { - if (vma->vm_flags & VM_SHARED) + if (vma_is_shared_maywrite(vma)) mapping_unmap_writable(mapping);
flush_dcache_mmap_lock(mapping); @@ -383,7 +384,7 @@ static unsigned long count_vma_pages_range(struct mm_struct *mm, static void __vma_link_file(struct vm_area_struct *vma, struct address_space *mapping) { - if (vma->vm_flags & VM_SHARED) + if (vma_is_shared_maywrite(vma)) mapping_allow_writable(mapping);
flush_dcache_mmap_lock(mapping); @@ -1285,6 +1286,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
if (file) { struct inode *inode = file_inode(file); + unsigned int seals = memfd_file_seals(file); unsigned long flags_mask;
if (!file_mmap_ok(file, inode, pgoff, len)) @@ -1323,6 +1325,8 @@ unsigned long do_mmap(struct file *file, unsigned long addr, vm_flags |= VM_SHARED | VM_MAYSHARE; if (!(file->f_mode & FMODE_WRITE)) vm_flags &= ~(VM_MAYWRITE | VM_SHARED); + else if (is_readonly_sealed(seals, vm_flags)) + vm_flags &= ~VM_MAYWRITE; fallthrough; case MAP_PRIVATE: if (!(file->f_mode & FMODE_READ)) @@ -2845,7 +2849,7 @@ static unsigned long __mmap_region(struct file *file, unsigned long addr, mm->map_count++; if (vma->vm_file) { i_mmap_lock_write(vma->vm_file->f_mapping); - if (vma->vm_flags & VM_SHARED) + if (vma_is_shared_maywrite(vma)) mapping_allow_writable(vma->vm_file->f_mapping);
flush_dcache_mmap_lock(vma->vm_file->f_mapping); @@ -2926,7 +2930,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, return -EINVAL;
/* Map writable and ensure this isn't a sealed memfd. */ - if (file && (vm_flags & VM_SHARED)) { + if (file && is_shared_maywrite(vm_flags)) { int error = mapping_map_writable(file->f_mapping);
if (error) diff --git a/mm/ptdump.c b/mm/ptdump.c index 03c1bdae4a43..e46df2c24d85 100644 --- a/mm/ptdump.c +++ b/mm/ptdump.c @@ -152,6 +152,7 @@ void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd) { const struct ptdump_range *range = st->range;
+ get_online_mems(); mmap_write_lock(mm); while (range->start != range->end) { walk_page_range_novma(mm, range->start, range->end, @@ -159,6 +160,7 @@ void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd) range++; } mmap_write_unlock(mm); + put_online_mems();
/* Flush out the last page */ st->note_page(st, 0, -1, 0); diff --git a/mm/shmem.c b/mm/shmem.c index 283fb62084d4..ecf1011cc3e2 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2396,7 +2396,7 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma) struct shmem_inode_info *info = SHMEM_I(inode); int ret;
- ret = seal_check_future_write(info->seals, vma); + ret = seal_check_write(info->seals, vma); if (ret) return ret;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 549ee9e87d63..ff9d2520ba74 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -339,7 +339,8 @@ static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data) case BT_CODEC_TRANSPARENT: if (!find_next_esco_param(conn, esco_param_msbc, ARRAY_SIZE(esco_param_msbc))) - return false; + return -EINVAL; + param = &esco_param_msbc[conn->attempt - 1]; cp.tx_coding_format.id = 0x03; cp.rx_coding_format.id = 0x03; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 3b22ce3aa95b..c06010c0d882 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -6664,8 +6664,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data, qos->ucast.out.latency = DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency), 1000); - qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu); - qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu); + qos->ucast.in.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0; + qos->ucast.out.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0; qos->ucast.in.phy = ev->c_phy; qos->ucast.out.phy = ev->p_phy; break; @@ -6679,8 +6679,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data, qos->ucast.in.latency = DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency), 1000); - qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu); - qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu); + qos->ucast.out.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0; + qos->ucast.in.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0; qos->ucast.out.phy = ev->c_phy; qos->ucast.in.phy = ev->p_phy; break; diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 69c2ba1e843e..d2613bd3e6db 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -118,7 +118,7 @@ static void hci_sock_free_cookie(struct sock *sk) int id = hci_pi(sk)->cookie;
if (id) { - hci_pi(sk)->cookie = 0xffffffff; + hci_pi(sk)->cookie = 0; ida_free(&sock_cookie_ida, id); } } diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index fa16ee88ec39..f42805d9b38f 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -4807,6 +4807,14 @@ void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx, intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN; }
+ if (intvl_jiffies > BR_MULTICAST_QUERY_INTVL_MAX) { + br_info(brmctx->br, + "trying to set multicast query interval above maximum, setting to %lu (%ums)\n", + jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MAX), + jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MAX)); + intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MAX; + } + brmctx->multicast_query_interval = intvl_jiffies; }
@@ -4823,6 +4831,14 @@ void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx, intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN; }
+ if (intvl_jiffies > BR_MULTICAST_STARTUP_QUERY_INTVL_MAX) { + br_info(brmctx->br, + "trying to set multicast startup query interval above maximum, setting to %lu (%ums)\n", + jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX), + jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX)); + intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MAX; + } + brmctx->multicast_startup_query_interval = intvl_jiffies; }
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 067d47b8eb8f..ef98ec4c3f51 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -31,6 +31,8 @@ #define BR_MULTICAST_DEFAULT_HASH_MAX 4096 #define BR_MULTICAST_QUERY_INTVL_MIN msecs_to_jiffies(1000) #define BR_MULTICAST_STARTUP_QUERY_INTVL_MIN BR_MULTICAST_QUERY_INTVL_MIN +#define BR_MULTICAST_QUERY_INTVL_MAX msecs_to_jiffies(86400000) /* 24 hours */ +#define BR_MULTICAST_STARTUP_QUERY_INTVL_MAX BR_MULTICAST_QUERY_INTVL_MAX
#define BR_HWDOM_MAX BITS_PER_LONG
diff --git a/net/core/dev.c b/net/core/dev.c index 4006fd164b7b..2d3e0e4130c2 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3559,6 +3559,18 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb, features &= ~NETIF_F_TSO_MANGLEID; }
+ /* NETIF_F_IPV6_CSUM does not support IPv6 extension headers, + * so neither does TSO that depends on it. + */ + if (features & NETIF_F_IPV6_CSUM && + (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 || + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && + vlan_get_protocol(skb) == htons(ETH_P_IPV6))) && + skb_transport_header_was_set(skb) && + skb_network_header_len(skb) != sizeof(struct ipv6hdr) && + !ipv6_has_hopopt_jumbo(skb)) + features &= ~(NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4); + return features; }
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 1e2e60ffe766..e6b36df482bc 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -55,7 +55,8 @@ static void __neigh_notify(struct neighbour *n, int type, int flags, u32 pid); static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid); static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, - struct net_device *dev); + struct net_device *dev, + bool skip_perm);
#ifdef CONFIG_PROC_FS static const struct seq_operations neigh_stat_seq_ops; @@ -444,7 +445,7 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev, { write_lock_bh(&tbl->lock); neigh_flush_dev(tbl, dev, skip_perm); - pneigh_ifdown_and_unlock(tbl, dev); + pneigh_ifdown_and_unlock(tbl, dev, skip_perm); pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL, tbl->family); if (skb_queue_empty_lockless(&tbl->proxy_queue)) @@ -845,7 +846,8 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey, }
static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, - struct net_device *dev) + struct net_device *dev, + bool skip_perm) { struct pneigh_entry *n, **np, *freelist = NULL; u32 h; @@ -853,12 +855,15 @@ static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, for (h = 0; h <= PNEIGH_HASHMASK; h++) { np = &tbl->phash_buckets[h]; while ((n = *np) != NULL) { + if (skip_perm && n->permanent) + goto skip; if (!dev || n->dev == dev) { *np = n->next; n->next = freelist; freelist = n; continue; } +skip: np = &n->next; } } @@ -2033,6 +2038,7 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, pn = pneigh_lookup(tbl, net, dst, dev, 1); if (pn) { pn->flags = ndm_flags; + pn->permanent = !!(ndm->ndm_state & NUD_PERMANENT); if (protocol) pn->protocol = protocol; err = 0; diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 70ac9d9bc877..20829e0c36cd 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -467,7 +467,7 @@ static void net_complete_free(void)
}
-static void net_free(struct net *net) +void net_passive_dec(struct net *net) { if (refcount_dec_and_test(&net->passive)) { kfree(rcu_access_pointer(net->gen)); @@ -485,7 +485,7 @@ void net_drop_ns(void *p) struct net *net = (struct net *)p;
if (net) - net_free(net); + net_passive_dec(net); }
struct net *copy_net_ns(unsigned long flags, @@ -527,7 +527,7 @@ struct net *copy_net_ns(unsigned long flags, key_remove_domain(net->key_domain); #endif put_user_ns(user_ns); - net_free(net); + net_passive_dec(net); dec_ucounts: dec_net_namespaces(ucounts); return ERR_PTR(rv); @@ -672,7 +672,7 @@ static void cleanup_net(struct work_struct *work) key_remove_domain(net->key_domain); #endif put_user_ns(net->user_ns); - net_free(net); + net_passive_dec(net); } }
diff --git a/net/core/sock.c b/net/core/sock.c index ec48690b5174..b74bc8175937 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2159,6 +2159,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, get_net_track(net, &sk->ns_tracker, priority); sock_inuse_add(net, 1); } else { + net_passive_inc(net); __netns_tracker_alloc(net, &sk->ns_tracker, false, priority); } @@ -2183,6 +2184,7 @@ EXPORT_SYMBOL(sk_alloc); static void __sk_destruct(struct rcu_head *head) { struct sock *sk = container_of(head, struct sock, sk_rcu); + struct net *net = sock_net(sk); struct sk_filter *filter;
if (sk->sk_destruct) @@ -2214,14 +2216,28 @@ static void __sk_destruct(struct rcu_head *head) put_cred(sk->sk_peer_cred); put_pid(sk->sk_peer_pid);
- if (likely(sk->sk_net_refcnt)) - put_net_track(sock_net(sk), &sk->ns_tracker); - else - __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false); - + if (likely(sk->sk_net_refcnt)) { + put_net_track(net, &sk->ns_tracker); + } else { + __netns_tracker_free(net, &sk->ns_tracker, false); + net_passive_dec(net); + } sk_prot_free(sk->sk_prot_creator, sk); }
+void sk_net_refcnt_upgrade(struct sock *sk) +{ + struct net *net = sock_net(sk); + + WARN_ON_ONCE(sk->sk_net_refcnt); + __netns_tracker_free(net, &sk->ns_tracker, false); + net_passive_dec(net); + sk->sk_net_refcnt = 1; + get_net_track(net, &sk->ns_tracker, GFP_KERNEL); + sock_inuse_add(net, 1); +} +EXPORT_SYMBOL_GPL(sk_net_refcnt_upgrade); + void sk_destruct(struct sock *sk) { bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE); @@ -2313,6 +2329,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) * is not properly dismantling its kernel sockets at netns * destroy time. */ + net_passive_inc(sock_net(newsk)); __netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker, false, priority); } diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c index 1b6457f357bd..b8230faa567f 100644 --- a/net/hsr/hsr_slave.c +++ b/net/hsr/hsr_slave.c @@ -62,8 +62,14 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb) skb_push(skb, ETH_HLEN); skb_reset_mac_header(skb); if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) || - protocol == htons(ETH_P_HSR)) + protocol == htons(ETH_P_HSR)) { + if (!pskb_may_pull(skb, ETH_HLEN + HSR_HLEN)) { + kfree_skb(skb); + goto finish_consume; + } + skb_set_network_header(skb, ETH_HLEN + HSR_HLEN); + } skb_reset_mac_len(skb);
hsr_forward_skb(skb, port); diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c index 675b5bbed638..2d663fe50f87 100644 --- a/net/ipv4/netfilter/nf_reject_ipv4.c +++ b/net/ipv4/netfilter/nf_reject_ipv4.c @@ -247,8 +247,7 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb, if (!oth) return;
- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) && - nf_reject_fill_skb_dst(oldskb) < 0) + if (!skb_dst(oldskb) && nf_reject_fill_skb_dst(oldskb) < 0) return;
if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) @@ -321,8 +320,7 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook) if (iph->frag_off & htons(IP_OFFSET)) return;
- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) && - nf_reject_fill_skb_dst(skb_in) < 0) + if (!skb_dst(skb_in) && nf_reject_fill_skb_dst(skb_in) < 0) return;
if (skb_csum_unnecessary(skb_in) || diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 6ee77f7f9114..8672ebbace98 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2560,7 +2560,6 @@ static struct rtable *__mkroute_output(const struct fib_result *res, do_cache = true; if (type == RTN_BROADCAST) { flags |= RTCF_BROADCAST | RTCF_LOCAL; - fi = NULL; } else if (type == RTN_MULTICAST) { flags |= RTCF_MULTICAST | RTCF_LOCAL; if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr, diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 3870b59f5400..9be9df2caf65 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -61,7 +61,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM); skb->remcsum_offload = remcsum;
- need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb)); + need_ipsec = (skb_dst(skb) && dst_xfrm(skb_dst(skb))) || skb_sec_path(skb); /* Try to offload checksum if possible */ offload_csum = !!(need_csum && !need_ipsec && diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f6188bd9f55b..1c3b0ba289fb 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2193,13 +2193,12 @@ void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp) in6_ifa_put(ifp); }
-/* Join to solicited addr multicast group. - * caller must hold RTNL */ +/* Join to solicited addr multicast group. */ void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr) { struct in6_addr maddr;
- if (dev->flags&(IFF_LOOPBACK|IFF_NOARP)) + if (READ_ONCE(dev->flags) & (IFF_LOOPBACK | IFF_NOARP)) return;
addrconf_addr_solict_mult(addr, &maddr); @@ -3834,7 +3833,7 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister) * Do not dev_put! */ if (unregister) { - idev->dead = 1; + WRITE_ONCE(idev->dead, 1);
/* protected by rtnl_lock */ RCU_INIT_POINTER(dev->ip6_ptr, NULL); diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index e153dac47a53..160b452f75e7 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -906,23 +906,22 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev, static int __ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr, unsigned int mode) { - struct ifmcaddr6 *mc; struct inet6_dev *idev; - - ASSERT_RTNL(); + struct ifmcaddr6 *mc;
/* we need to take a reference on idev */ idev = in6_dev_get(dev); - if (!idev) return -EINVAL;
- if (idev->dead) { + mutex_lock(&idev->mc_lock); + + if (READ_ONCE(idev->dead)) { + mutex_unlock(&idev->mc_lock); in6_dev_put(idev); return -ENODEV; }
- mutex_lock(&idev->mc_lock); for_each_mc_mclock(idev, mc) { if (ipv6_addr_equal(&mc->mca_addr, addr)) { mc->mca_users++; diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c index e4776bd2ed89..f3579bccf0a5 100644 --- a/net/ipv6/netfilter/nf_reject_ipv6.c +++ b/net/ipv6/netfilter/nf_reject_ipv6.c @@ -293,7 +293,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb, fl6.fl6_sport = otcph->dest; fl6.fl6_dport = otcph->source;
- if (hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) { + if (!skb_dst(oldskb)) { nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false); if (!dst) return; @@ -397,8 +397,7 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL) skb_in->dev = net->loopback_dev;
- if ((hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_INGRESS) && - nf_reject6_fill_skb_dst(skb_in) < 0) + if (!skb_dst(skb_in) && nf_reject6_fill_skb_dst(skb_in) < 0) return;
icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0); diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c index 3c3800223e0e..6e15a65faecc 100644 --- a/net/ipv6/seg6_hmac.c +++ b/net/ipv6/seg6_hmac.c @@ -35,6 +35,7 @@ #include <net/xfrm.h>
#include <crypto/hash.h> +#include <crypto/utils.h> #include <net/seg6.h> #include <net/genetlink.h> #include <net/seg6_hmac.h> @@ -269,7 +270,7 @@ bool seg6_hmac_validate_skb(struct sk_buff *skb) if (seg6_hmac_compute(hinfo, srh, &ipv6_hdr(skb)->saddr, hmac_output)) return false;
- if (memcmp(hmac_output, tlv->hmac, SEG6_HMAC_FIELD_LEN) != 0) + if (crypto_memneq(hmac_output, tlv->hmac, SEG6_HMAC_FIELD_LEN)) return false;
return true; @@ -293,6 +294,9 @@ int seg6_hmac_info_add(struct net *net, u32 key, struct seg6_hmac_info *hinfo) struct seg6_pernet_data *sdata = seg6_pernet(net); int err;
+ if (!__hmac_get_algo(hinfo->alg_id)) + return -EINVAL; + err = rhashtable_lookup_insert_fast(&sdata->hmac_infos, &hinfo->node, rht_params);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 3ff7f38394a6..1addfba4b285 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1847,12 +1847,12 @@ static int sta_link_apply_parameters(struct ieee80211_local *local, }
if (params->supported_rates && - params->supported_rates_len) { - ieee80211_parse_bitrates(link->conf->chandef.width, - sband, params->supported_rates, - params->supported_rates_len, - &link_sta->pub->supp_rates[sband->band]); - } + params->supported_rates_len && + !ieee80211_parse_bitrates(link->conf->chandef.width, + sband, params->supported_rates, + params->supported_rates_len, + &link_sta->pub->supp_rates[sband->band])) + return -EINVAL;
if (params->ht_capa) ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 31c4f112345e..4a21e53afa72 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -1313,6 +1313,7 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link) goto out; }
+ link->radar_required = link->reserved_radar_required; list_move(&link->assigned_chanctx_list, &new_ctx->assigned_links); rcu_assign_pointer(link_conf->chanctx_conf, &new_ctx->conf);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 2c7e139efd53..295c2fdbd3c7 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3662,6 +3662,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, struct ieee80211_prep_tx_info info = { .subtype = IEEE80211_STYPE_AUTH, }; + bool sae_need_confirm = false;
sdata_assert_lock(sdata);
@@ -3705,6 +3706,8 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, jiffies + IEEE80211_AUTH_WAIT_SAE_RETRY; ifmgd->auth_data->timeout_started = true; run_again(sdata, ifmgd->auth_data->timeout); + if (auth_transaction == 1) + sae_need_confirm = true; goto notify_driver; }
@@ -3747,6 +3750,9 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, ifmgd->auth_data->expected_transaction == 2)) { if (!ieee80211_mark_sta_auth(sdata)) return; /* ignore frame -- wait for timeout */ + } else if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE && + auth_transaction == 1) { + sae_need_confirm = true; } else if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE && auth_transaction == 2) { sdata_info(sdata, "SAE peer confirmed\n"); @@ -3755,7 +3761,8 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); notify_driver: - drv_mgd_complete_tx(sdata->local, sdata, &info); + if (!sae_need_confirm) + drv_mgd_complete_tx(sdata->local, sdata, &info); }
#define case_WLAN(type) \ diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 58665b6ae635..210337ef23cf 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -4221,10 +4221,16 @@ static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx, rx->link_sta = NULL; }
- if (link_id < 0) - rx->link = &rx->sdata->deflink; - else if (!ieee80211_rx_data_set_link(rx, link_id)) + if (link_id < 0) { + if (ieee80211_vif_is_mld(&rx->sdata->vif) && + sta && !sta->sta.valid_links) + rx->link = + rcu_dereference(rx->sdata->link[sta->deflink.link_id]); + else + rx->link = &rx->sdata->deflink; + } else if (!ieee80211_rx_data_set_link(rx, link_id)) { return false; + }
return true; } diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c index 8032cfba22d1..5f9592fb57ad 100644 --- a/net/mctp/af_mctp.c +++ b/net/mctp/af_mctp.c @@ -73,7 +73,6 @@ static int mctp_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
lock_sock(sk);
- /* TODO: allow rebind */ if (sk_hashed(sk)) { rc = -EADDRINUSE; goto out_release; @@ -549,15 +548,36 @@ static void mctp_sk_close(struct sock *sk, long timeout) static int mctp_sk_hash(struct sock *sk) { struct net *net = sock_net(sk); + struct sock *existing; + struct mctp_sock *msk; + int rc; + + msk = container_of(sk, struct mctp_sock, sk);
/* Bind lookup runs under RCU, remain live during that. */ sock_set_flag(sk, SOCK_RCU_FREE);
mutex_lock(&net->mctp.bind_lock); + + /* Prevent duplicate binds. */ + sk_for_each(existing, &net->mctp.binds) { + struct mctp_sock *mex = + container_of(existing, struct mctp_sock, sk); + + if (mex->bind_type == msk->bind_type && + mex->bind_addr == msk->bind_addr && + mex->bind_net == msk->bind_net) { + rc = -EADDRINUSE; + goto out; + } + } + sk_add_node_rcu(sk, &net->mctp.binds); - mutex_unlock(&net->mctp.bind_lock); + rc = 0;
- return 0; +out: + mutex_unlock(&net->mctp.bind_lock); + return rc; }
static void mctp_sk_unhash(struct sock *sk) diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 8d4889a73006..9406d2d555e7 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -1117,7 +1117,9 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk, return hmac == mp_opt->ahmac; }
-/* Return false if a subflow has been reset, else return true */ +/* Return false in case of error (or subflow has been reset), + * else return true. + */ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); @@ -1221,7 +1223,7 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
mpext = skb_ext_add(skb, SKB_EXT_MPTCP); if (!mpext) - return true; + return false;
memset(mpext, 0, sizeof(*mpext));
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index f7257de37bd0..e8042014bd5f 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -294,6 +294,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer) struct mptcp_pm_add_entry *entry = from_timer(entry, timer, add_timer); struct mptcp_sock *msk = entry->sock; struct sock *sk = (struct sock *)msk; + unsigned int timeout;
pr_debug("msk=%p\n", msk);
@@ -311,6 +312,10 @@ static void mptcp_pm_add_timer(struct timer_list *timer) goto out; }
+ timeout = mptcp_get_add_addr_timeout(sock_net(sk)); + if (!timeout) + goto out; + spin_lock_bh(&msk->pm.lock);
if (!mptcp_pm_should_add_signal_addr(msk)) { @@ -322,7 +327,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
if (entry->retrans_times < ADD_ADDR_RETRANS_MAX) sk_reset_timer(sk, timer, - jiffies + mptcp_get_add_addr_timeout(sock_net(sk))); + jiffies + timeout);
spin_unlock_bh(&msk->pm.lock);
@@ -364,6 +369,7 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, struct mptcp_pm_add_entry *add_entry = NULL; struct sock *sk = (struct sock *)msk; struct net *net = sock_net(sk); + unsigned int timeout;
lockdep_assert_held(&msk->pm.lock);
@@ -373,9 +379,7 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk))) return false;
- sk_reset_timer(sk, &add_entry->add_timer, - jiffies + mptcp_get_add_addr_timeout(net)); - return true; + goto reset_timer; }
add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC); @@ -389,8 +393,10 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, add_entry->retrans_times = 0;
timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0); - sk_reset_timer(sk, &add_entry->add_timer, - jiffies + mptcp_get_add_addr_timeout(net)); +reset_timer: + timeout = mptcp_get_add_addr_timeout(net); + if (timeout) + sk_reset_timer(sk, &add_entry->add_timer, jiffies + timeout);
return true; } @@ -1783,7 +1789,6 @@ static void __flush_addrs(struct list_head *list) static void __reset_counters(struct pm_nl_pernet *pernet) { WRITE_ONCE(pernet->add_addr_signal_max, 0); - WRITE_ONCE(pernet->add_addr_accept_max, 0); WRITE_ONCE(pernet->local_addr_max, 0); pernet->addrs = 0; } diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index a01ea18283c7..0c9b9c0c277c 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -1715,10 +1715,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family, * needs it. * Update ns_tracker to current stack trace and refcounted tracker. */ - __netns_tracker_free(net, &sf->sk->ns_tracker, false); - sf->sk->sk_net_refcnt = 1; - get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL); - sock_inuse_add(net, 1); + sk_net_refcnt_upgrade(sf->sk); err = tcp_set_ulp(sf->sk, "mptcp");
release_ssk: diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h index 2c260f33b55c..ad1f671ffc37 100644 --- a/net/ncsi/internal.h +++ b/net/ncsi/internal.h @@ -110,7 +110,7 @@ struct ncsi_channel_version { u8 update; /* NCSI version update */ char alpha1; /* NCSI version alpha1 */ char alpha2; /* NCSI version alpha2 */ - u8 fw_name[12]; /* Firmware name string */ + u8 fw_name[12 + 1]; /* Firmware name string */ u32 fw_version; /* Firmware version */ u16 pci_ids[4]; /* PCI identification */ u32 mf_id; /* Manufacture ID */ diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c index 8668888c5a2f..d5ed80731e89 100644 --- a/net/ncsi/ncsi-rsp.c +++ b/net/ncsi/ncsi-rsp.c @@ -775,6 +775,7 @@ static int ncsi_rsp_handler_gvi(struct ncsi_request *nr) ncv->alpha1 = rsp->alpha1; ncv->alpha2 = rsp->alpha2; memcpy(ncv->fw_name, rsp->fw_name, 12); + ncv->fw_name[12] = '\0'; ncv->fw_version = ntohl(rsp->fw_version); for (i = 0; i < ARRAY_SIZE(ncv->pci_ids); i++) ncv->pci_ids[i] = ntohs(rsp->pci_ids[i]); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 282e9644f6fd..928bd2013289 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -859,8 +859,6 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
static int ctnetlink_done(struct netlink_callback *cb) { - if (cb->args[1]) - nf_ct_put((struct nf_conn *)cb->args[1]); kfree(cb->data); return 0; } @@ -1175,19 +1173,26 @@ static int ctnetlink_filter_match(struct nf_conn *ct, void *data) return 0; }
+static unsigned long ctnetlink_get_id(const struct nf_conn *ct) +{ + unsigned long id = nf_ct_get_id(ct); + + return id ? id : 1; +} + static int ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) { unsigned int flags = cb->data ? NLM_F_DUMP_FILTERED : 0; struct net *net = sock_net(skb->sk); - struct nf_conn *ct, *last; + unsigned long last_id = cb->args[1]; struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; struct nf_conn *nf_ct_evict[8]; + struct nf_conn *ct; int res, i; spinlock_t *lockp;
- last = (struct nf_conn *)cb->args[1]; i = 0;
local_bh_disable(); @@ -1224,7 +1229,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) continue;
if (cb->args[1]) { - if (ct != last) + if (ctnetlink_get_id(ct) != last_id) continue; cb->args[1] = 0; } @@ -1237,8 +1242,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) NFNL_MSG_TYPE(cb->nlh->nlmsg_type), ct, true, flags); if (res < 0) { - nf_conntrack_get(&ct->ct_general); - cb->args[1] = (unsigned long)ct; + cb->args[1] = ctnetlink_get_id(ct); spin_unlock(lockp); goto out; } @@ -1251,12 +1255,10 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) } out: local_bh_enable(); - if (last) { + if (last_id) { /* nf ct hash resize happened, now clear the leftover. */ - if ((struct nf_conn *)cb->args[1] == last) + if (cb->args[1] == last_id) cb->args[1] = 0; - - nf_ct_put(last); }
while (i) { diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 0a412d9a8e5f..a5ffda87daf6 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -798,16 +798,6 @@ static int netlink_release(struct socket *sock)
sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
- /* Because struct net might disappear soon, do not keep a pointer. */ - if (!sk->sk_net_refcnt && sock_net(sk) != &init_net) { - __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false); - /* Because of deferred_put_nlk_sk and use of work queue, - * it is possible netns will be freed before this socket. - */ - sock_net_set(sk, &init_net); - __netns_tracker_alloc(&init_net, &sk->ns_tracker, - false, GFP_KERNEL); - } call_rcu(&nlk->rcu, deferred_put_nlk_sk); return 0; } @@ -1229,7 +1219,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, nlk = nlk_sk(sk); rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc);
- if ((rmem == skb->truesize || rmem < READ_ONCE(sk->sk_rcvbuf)) && + if ((rmem == skb->truesize || rmem <= READ_ONCE(sk->sk_rcvbuf)) && !test_bit(NETLINK_S_CONGESTED, &nlk->state)) { netlink_skb_set_owner_r(skb, sk); return 0; diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 2dba7505b414..985b05f38b67 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -503,12 +503,8 @@ bool rds_tcp_tune(struct socket *sock) release_sock(sk); return false; } - /* Update ns_tracker to current stack trace and refcounted tracker */ - __netns_tracker_free(net, &sk->ns_tracker, false); - - sk->sk_net_refcnt = 1; - netns_tracker_alloc(net, &sk->ns_tracker, GFP_KERNEL); - sock_inuse_add(net, 1); + sk_net_refcnt_upgrade(sk); + put_net(net); } rtn = net_generic(net, rds_tcp_netid); if (rtn->sndbuf_size > 0) { diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 09242578dac5..85984c91cf51 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -1762,7 +1762,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, ktime_t now = ktime_get(); struct cake_tin_data *b; struct cake_flow *flow; - u32 idx; + u32 idx, tin;
/* choose flow to insert into */ idx = cake_classify(sch, &b, skb, q->flow_mode, &ret); @@ -1772,6 +1772,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, __qdisc_drop(skb, to_free); return ret; } + tin = (u32)(b - q->tins); idx--; flow = &b->flows[idx];
@@ -1939,13 +1940,22 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, q->buffer_max_used = q->buffer_used;
if (q->buffer_used > q->buffer_limit) { + bool same_flow = false; u32 dropped = 0; + u32 drop_id;
while (q->buffer_used > q->buffer_limit) { dropped++; - cake_drop(sch, to_free); + drop_id = cake_drop(sch, to_free); + + if ((drop_id >> 16) == tin && + (drop_id & 0xFFFF) == idx) + same_flow = true; } b->drop_overlimit += dropped; + + if (same_flow) + return NET_XMIT_CN; } return NET_XMIT_SUCCESS; } diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index 3ee46f6e005d..9873f4ae90c3 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -651,23 +651,24 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
sch_tree_lock(sch);
- q->nbands = nbands; + for (i = nbands; i < oldbands; i++) { + if (i >= q->nstrict && q->classes[i].qdisc->q.qlen) + list_del_init(&q->classes[i].alist); + qdisc_purge_queue(q->classes[i].qdisc); + } + + WRITE_ONCE(q->nbands, nbands); for (i = nstrict; i < q->nstrict; i++) { if (q->classes[i].qdisc->q.qlen) { list_add_tail(&q->classes[i].alist, &q->active); q->classes[i].deficit = quanta[i]; } } - for (i = q->nbands; i < oldbands; i++) { - if (i >= q->nstrict && q->classes[i].qdisc->q.qlen) - list_del_init(&q->classes[i].alist); - qdisc_purge_queue(q->classes[i].qdisc); - } - q->nstrict = nstrict; + WRITE_ONCE(q->nstrict, nstrict); memcpy(q->prio2band, priomap, sizeof(priomap));
for (i = 0; i < q->nbands; i++) - q->classes[i].quantum = quanta[i]; + WRITE_ONCE(q->classes[i].quantum, quanta[i]);
for (i = oldbands; i < q->nbands; i++) { q->classes[i].qdisc = queues[i]; @@ -681,7 +682,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, for (i = q->nbands; i < oldbands; i++) { qdisc_put(q->classes[i].qdisc); q->classes[i].qdisc = NULL; - q->classes[i].quantum = 0; + WRITE_ONCE(q->classes[i].quantum, 0); q->classes[i].deficit = 0; gnet_stats_basic_sync_init(&q->classes[i].bstats); memset(&q->classes[i].qstats, 0, sizeof(q->classes[i].qstats)); @@ -738,6 +739,7 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb) struct ets_sched *q = qdisc_priv(sch); struct nlattr *opts; struct nlattr *nest; + u8 nbands, nstrict; int band; int prio; int err; @@ -750,21 +752,22 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb) if (!opts) goto nla_err;
- if (nla_put_u8(skb, TCA_ETS_NBANDS, q->nbands)) + nbands = READ_ONCE(q->nbands); + if (nla_put_u8(skb, TCA_ETS_NBANDS, nbands)) goto nla_err;
- if (q->nstrict && - nla_put_u8(skb, TCA_ETS_NSTRICT, q->nstrict)) + nstrict = READ_ONCE(q->nstrict); + if (nstrict && nla_put_u8(skb, TCA_ETS_NSTRICT, nstrict)) goto nla_err;
- if (q->nbands > q->nstrict) { + if (nbands > nstrict) { nest = nla_nest_start(skb, TCA_ETS_QUANTA); if (!nest) goto nla_err;
- for (band = q->nstrict; band < q->nbands; band++) { + for (band = nstrict; band < nbands; band++) { if (nla_put_u32(skb, TCA_ETS_QUANTA_BAND, - q->classes[band].quantum)) + READ_ONCE(q->classes[band].quantum))) goto nla_err; }
@@ -776,7 +779,8 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb) goto nla_err;
for (prio = 0; prio <= TC_PRIO_MAX; prio++) { - if (nla_put_u8(skb, TCA_ETS_PRIOMAP_BAND, q->prio2band[prio])) + if (nla_put_u8(skb, TCA_ETS_PRIOMAP_BAND, + READ_ONCE(q->prio2band[prio]))) goto nla_err; }
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 113b305b0d15..c8a426062923 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -592,7 +592,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff) */ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) { - WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen); + WARN_ON(cl->level || !cl->leaf.q);
if (!cl->prio_activity) { cl->prio_activity = 1 << cl->prio; diff --git a/net/sctp/input.c b/net/sctp/input.c index a8a254a5008e..032a10d82302 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -117,7 +117,7 @@ int sctp_rcv(struct sk_buff *skb) * it's better to just linearize it otherwise crc computing * takes longer. */ - if ((!is_gso && skb_linearize(skb)) || + if (((!is_gso || skb_cloned(skb)) && skb_linearize(skb)) || !pskb_may_pull(skb, sizeof(struct sctphdr))) goto discard_it;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 45efbbfff94a..b3bfd0f18d41 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -2553,8 +2553,9 @@ static void smc_listen_work(struct work_struct *work) goto out_decl; }
- smc_listen_out_connected(new_smc); SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini); + /* smc_listen_out() will release smcsk */ + smc_listen_out_connected(new_smc); goto out_free;
out_unlock: @@ -3343,10 +3344,7 @@ int smc_create_clcsk(struct net *net, struct sock *sk, int family) * which need net ref. */ sk = smc->clcsock->sk; - __netns_tracker_free(net, &sk->ns_tracker, false); - sk->sk_net_refcnt = 1; - get_net_track(net, &sk->ns_tracker, GFP_KERNEL); - sock_inuse_add(net, 1); + sk_net_refcnt_upgrade(sk); return 0; }
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 7229b4a9ad1d..78b139d8c1f3 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1579,10 +1579,7 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv, newlen = error;
if (protocol == IPPROTO_TCP) { - __netns_tracker_free(net, &sock->sk->ns_tracker, false); - sock->sk->sk_net_refcnt = 1; - get_net_track(net, &sock->sk->ns_tracker, GFP_KERNEL); - sock_inuse_add(net, 1); + sk_net_refcnt_upgrade(sock->sk); if ((error = kernel_listen(sock, 64)) < 0) goto bummer; } diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 99bb3e762af4..8b27a21f3b42 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1941,12 +1941,8 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt, goto out; }
- if (protocol == IPPROTO_TCP) { - __netns_tracker_free(xprt->xprt_net, &sock->sk->ns_tracker, false); - sock->sk->sk_net_refcnt = 1; - get_net_track(xprt->xprt_net, &sock->sk->ns_tracker, GFP_KERNEL); - sock_inuse_add(xprt->xprt_net, 1); - } + if (protocol == IPPROTO_TCP) + sk_net_refcnt_upgrade(sock->sk);
filp = sock_alloc_file(sock, O_NONBLOCK, NULL); if (IS_ERR(filp)) diff --git a/net/tls/tls.h b/net/tls/tls.h index 02038d0381b7..5dc61c85c076 100644 --- a/net/tls/tls.h +++ b/net/tls/tls.h @@ -192,7 +192,7 @@ void tls_strp_msg_done(struct tls_strparser *strp); int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb); void tls_rx_msg_ready(struct tls_strparser *strp);
-void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh); +bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh); int tls_strp_msg_cow(struct tls_sw_context_rx *ctx); struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx); int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst); diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c index bea60b0160d1..6ce64a6e4495 100644 --- a/net/tls/tls_strp.c +++ b/net/tls/tls_strp.c @@ -474,7 +474,7 @@ static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len) strp->stm.offset = offset; }
-void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) +bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) { struct strp_msg *rxm; struct tls_msg *tlm; @@ -483,8 +483,11 @@ void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len);
if (!strp->copy_mode && force_refresh) { - if (WARN_ON(tcp_inq(strp->sk) < strp->stm.full_len)) - return; + if (unlikely(tcp_inq(strp->sk) < strp->stm.full_len)) { + WRITE_ONCE(strp->msg_ready, 0); + memset(&strp->stm, 0, sizeof(strp->stm)); + return false; + }
tls_strp_load_anchor_with_queue(strp, strp->stm.full_len); } @@ -494,6 +497,8 @@ void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) rxm->offset = strp->stm.offset; tlm = tls_msg(strp->anchor); tlm->control = strp->mark; + + return true; }
/* Called with lock held on lower socket */ diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 4905a81c4ac1..27ce1feb79e1 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1380,7 +1380,8 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock, return sock_intr_errno(timeo); }
- tls_strp_msg_load(&ctx->strp, released); + if (unlikely(!tls_strp_msg_load(&ctx->strp, released))) + return tls_rx_rec_wait(sk, psock, nonblock, false);
return 1; } @@ -1773,6 +1774,9 @@ int decrypt_skb(struct sock *sk, struct scatterlist *sgout) return tls_decrypt_sg(sk, NULL, sgout, &darg); }
+/* All records returned from a recvmsg() call must have the same type. + * 0 is not a valid content type. Use it as "no type reported, yet". + */ static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm, u8 *control) { @@ -2016,8 +2020,10 @@ int tls_sw_recvmsg(struct sock *sk, if (err < 0) goto end;
+ /* process_rx_list() will set @control if it processed any records */ copied = err; - if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more) + if (len <= copied || rx_more || + (control && control != TLS_RECORD_TYPE_DATA)) goto end;
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 2925f5d27ad3..e1d7ce8dac08 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -221,7 +221,7 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk)
static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) { - int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM; + int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; struct scatterlist pkt, *p; struct virtqueue *vq; struct sk_buff *skb; @@ -497,8 +497,9 @@ static void virtio_transport_rx_work(struct work_struct *work) do { virtqueue_disable_cb(vq); for (;;) { + unsigned int len, payload_len; + struct virtio_vsock_hdr *hdr; struct sk_buff *skb; - unsigned int len;
if (!virtio_transport_more_replies(vsock)) { /* Stop rx until the device processes already @@ -515,12 +516,19 @@ static void virtio_transport_rx_work(struct work_struct *work) vsock->rx_buf_nr--;
/* Drop short/long packets */ - if (unlikely(len < sizeof(struct virtio_vsock_hdr) || + if (unlikely(len < sizeof(*hdr) || len > virtio_vsock_skb_len(skb))) { kfree_skb(skb); continue; }
+ hdr = virtio_vsock_hdr(skb); + payload_len = le32_to_cpu(hdr->len); + if (unlikely(payload_len > len - sizeof(*hdr))) { + kfree_skb(skb); + continue; + } + virtio_vsock_skb_rx_put(skb); virtio_transport_deliver_tap_pkt(skb); virtio_transport_recv_pkt(&virtio_transport, skb); diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 55a1d3633853..3d631f8073f0 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -739,7 +739,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
mgmt = (const struct ieee80211_mgmt *)params->buf;
- if (!ieee80211_is_mgmt(mgmt->frame_control)) + if (!ieee80211_is_mgmt(mgmt->frame_control) || + ieee80211_has_order(mgmt->frame_control)) return -EINVAL;
stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index d2bd5bddfb05..acfbe1f013d1 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1466,6 +1466,26 @@ struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi, } EXPORT_SYMBOL(xfrm_state_lookup_byspi);
+static struct xfrm_state *xfrm_state_lookup_spi_proto(struct net *net, __be32 spi, u8 proto) +{ + struct xfrm_state *x; + unsigned int i; + + rcu_read_lock(); + for (i = 0; i <= net->xfrm.state_hmask; i++) { + hlist_for_each_entry_rcu(x, &net->xfrm.state_byspi[i], byspi) { + if (x->id.spi == spi && x->id.proto == proto) { + if (!xfrm_state_hold_rcu(x)) + continue; + rcu_read_unlock(); + return x; + } + } + } + rcu_read_unlock(); + return NULL; +} + static void __xfrm_state_insert(struct xfrm_state *x) { struct net *net = xs_net(x); @@ -2259,10 +2279,8 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high, unsigned int h; struct xfrm_state *x0; int err = -ENOENT; - __be32 minspi = htonl(low); - __be32 maxspi = htonl(high); + u32 range = high - low + 1; __be32 newspi = 0; - u32 mark = x->mark.v & x->mark.m;
spin_lock_bh(&x->lock); if (x->km.state == XFRM_STATE_DEAD) { @@ -2276,38 +2294,34 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high,
err = -ENOENT;
- if (minspi == maxspi) { - x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family); - if (x0) { - NL_SET_ERR_MSG(extack, "Requested SPI is already in use"); - xfrm_state_put(x0); + for (h = 0; h < range; h++) { + u32 spi = (low == high) ? low : get_random_u32_inclusive(low, high); + newspi = htonl(spi); + + spin_lock_bh(&net->xfrm.xfrm_state_lock); + x0 = xfrm_state_lookup_spi_proto(net, newspi, x->id.proto); + if (!x0) { + x->id.spi = newspi; + h = xfrm_spi_hash(net, &x->id.daddr, newspi, x->id.proto, x->props.family); + XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h, x->xso.type); + spin_unlock_bh(&net->xfrm.xfrm_state_lock); + err = 0; goto unlock; } - newspi = minspi; - } else { - u32 spi = 0; - for (h = 0; h < high-low+1; h++) { - spi = get_random_u32_inclusive(low, high); - x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family); - if (x0 == NULL) { - newspi = htonl(spi); - break; - } - xfrm_state_put(x0); + xfrm_state_put(x0); + spin_unlock_bh(&net->xfrm.xfrm_state_lock); + + if (signal_pending(current)) { + err = -ERESTARTSYS; + goto unlock; } + + if (low == high) + break; } - if (newspi) { - spin_lock_bh(&net->xfrm.xfrm_state_lock); - x->id.spi = newspi; - h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family); - XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h, - x->xso.type); - spin_unlock_bh(&net->xfrm.xfrm_state_lock);
- err = 0; - } else { + if (err) NL_SET_ERR_MSG(extack, "No SPI available in the requested range"); - }
unlock: spin_unlock_bh(&x->lock); diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c index 9e52c7360e55..2b99d18e703d 100644 --- a/scripts/kconfig/gconf.c +++ b/scripts/kconfig/gconf.c @@ -780,7 +780,7 @@ static void renderer_edited(GtkCellRendererText * cell, struct symbol *sym;
if (!gtk_tree_model_get_iter(model2, &iter, path)) - return; + goto free;
gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1); sym = menu->sym; @@ -792,6 +792,7 @@ static void renderer_edited(GtkCellRendererText * cell,
update_tree(&rootmenu, NULL);
+free: gtk_tree_path_free(path); }
@@ -974,13 +975,14 @@ on_treeview2_key_press_event(GtkWidget * widget, void on_treeview2_cursor_changed(GtkTreeView * treeview, gpointer user_data) { + GtkTreeModel *model = gtk_tree_view_get_model(treeview); GtkTreeSelection *selection; GtkTreeIter iter; struct menu *menu;
selection = gtk_tree_view_get_selection(treeview); - if (gtk_tree_selection_get_selected(selection, &model2, &iter)) { - gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1); + if (gtk_tree_selection_get_selected(selection, &model, &iter)) { + gtk_tree_model_get(model, &iter, COL_MENU, &menu, -1); text_insert_help(menu); } } diff --git a/scripts/kconfig/lxdialog/inputbox.c b/scripts/kconfig/lxdialog/inputbox.c index 1dcfb288ee63..327b60cdb8da 100644 --- a/scripts/kconfig/lxdialog/inputbox.c +++ b/scripts/kconfig/lxdialog/inputbox.c @@ -39,8 +39,10 @@ int dialog_inputbox(const char *title, const char *prompt, int height, int width
if (!init) instr[0] = '\0'; - else - strcpy(instr, init); + else { + strncpy(instr, init, sizeof(dialog_input_result) - 1); + instr[sizeof(dialog_input_result) - 1] = '\0'; + }
do_resize: if (getmaxy(stdscr) <= (height - INPUTBOX_HEIGTH_MIN)) diff --git a/scripts/kconfig/lxdialog/menubox.c b/scripts/kconfig/lxdialog/menubox.c index 0e333284e947..6bb8a320a4cb 100644 --- a/scripts/kconfig/lxdialog/menubox.c +++ b/scripts/kconfig/lxdialog/menubox.c @@ -264,7 +264,7 @@ int dialog_menu(const char *title, const char *prompt, if (key < 256 && isalpha(key)) key = tolower(key);
- if (strchr("ynmh", key)) + if (strchr("ynmh ", key)) i = max_choice; else { for (i = choice + 1; i < max_choice; i++) { diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c index 8cd72fe25974..7a17c94a1594 100644 --- a/scripts/kconfig/nconf.c +++ b/scripts/kconfig/nconf.c @@ -591,6 +591,8 @@ static void item_add_str(const char *fmt, ...) tmp_str, sizeof(k_menu_items[index].str));
+ k_menu_items[index].str[sizeof(k_menu_items[index].str) - 1] = '\0'; + free_item(curses_menu_items[index]); curses_menu_items[index] = new_item( k_menu_items[index].str, diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c index 25a7263ef3c8..5f13a0a7fb0b 100644 --- a/scripts/kconfig/nconf.gui.c +++ b/scripts/kconfig/nconf.gui.c @@ -349,6 +349,7 @@ int dialog_inputbox(WINDOW *main_window, x = (columns-win_cols)/2;
strncpy(result, init, *result_len); + result[*result_len - 1] = '\0';
/* create the windows */ win = newwin(win_lines, win_cols, y, x); diff --git a/security/apparmor/file.c b/security/apparmor/file.c index 6fd21324a097..a51b83cf6968 100644 --- a/security/apparmor/file.c +++ b/security/apparmor/file.c @@ -436,9 +436,11 @@ int aa_path_link(const struct cred *subj_cred, { struct path link = { .mnt = new_dir->mnt, .dentry = new_dentry }; struct path target = { .mnt = new_dir->mnt, .dentry = old_dentry }; + struct inode *inode = d_backing_inode(old_dentry); + vfsuid_t vfsuid = i_uid_into_vfsuid(mnt_idmap(target.mnt), inode); struct path_cond cond = { - d_backing_inode(old_dentry)->i_uid, - d_backing_inode(old_dentry)->i_mode + .uid = vfsuid_into_kuid(vfsuid), + .mode = inode->i_mode, }; char *buffer = NULL, *buffer2 = NULL; struct aa_profile *profile; diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h index 73c8a32c6861..6e88e99da80f 100644 --- a/security/apparmor/include/lib.h +++ b/security/apparmor/include/lib.h @@ -46,7 +46,11 @@ #define AA_BUG_FMT(X, fmt, args...) \ WARN((X), "AppArmor WARN %s: (" #X "): " fmt, __func__, ##args) #else -#define AA_BUG_FMT(X, fmt, args...) no_printk(fmt, ##args) +#define AA_BUG_FMT(X, fmt, args...) \ + do { \ + BUILD_BUG_ON_INVALID(X); \ + no_printk(fmt, ##args); \ + } while (0) #endif
#define AA_ERROR(fmt, args...) \ diff --git a/security/inode.c b/security/inode.c index 3aa75fffa8c9..a90b043695d9 100644 --- a/security/inode.c +++ b/security/inode.c @@ -159,7 +159,6 @@ static struct dentry *securityfs_create_dentry(const char *name, umode_t mode, inode->i_fop = fops; } d_instantiate(dentry, inode); - dget(dentry); inode_unlock(dir); return dentry;
@@ -306,7 +305,6 @@ void securityfs_remove(struct dentry *dentry) simple_rmdir(dir, dentry); else simple_unlink(dir, dentry); - dput(dentry); } inode_unlock(dir); simple_release_fs(&mount, &mount_count); diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 31fc20350fd9..f37fd1e48740 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -24,6 +24,7 @@ #include <sound/minors.h> #include <linux/uio.h> #include <linux/delay.h> +#include <linux/bitops.h>
#include "pcm_local.h"
@@ -3125,13 +3126,23 @@ struct snd_pcm_sync_ptr32 { static snd_pcm_uframes_t recalculate_boundary(struct snd_pcm_runtime *runtime) { snd_pcm_uframes_t boundary; + snd_pcm_uframes_t border; + int order;
if (! runtime->buffer_size) return 0; - boundary = runtime->buffer_size; - while (boundary * 2 <= 0x7fffffffUL - runtime->buffer_size) - boundary *= 2; - return boundary; + + border = 0x7fffffffUL - runtime->buffer_size; + if (runtime->buffer_size > border) + return runtime->buffer_size; + + order = __fls(border) - __fls(runtime->buffer_size); + boundary = runtime->buffer_size << order; + + if (boundary <= border) + return boundary; + else + return boundary / 2; }
static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream, diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c index bbf7bcdb449a..0a9223c18d77 100644 --- a/sound/hda/hdac_device.c +++ b/sound/hda/hdac_device.c @@ -611,7 +611,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_power_up_pm); int snd_hdac_keep_power_up(struct hdac_device *codec) { if (!atomic_inc_not_zero(&codec->in_pm)) { - int ret = pm_runtime_get_if_active(&codec->dev, true); + int ret = pm_runtime_get_if_active(&codec->dev); if (!ret) return -1; if (ret < 0) diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index aa6dc00985b5..80c3084189b0 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -641,24 +641,16 @@ static void hda_jackpoll_work(struct work_struct *work) struct hda_codec *codec = container_of(work, struct hda_codec, jackpoll_work.work);
- /* for non-polling trigger: we need nothing if already powered on */ - if (!codec->jackpoll_interval && snd_hdac_is_power_on(&codec->core)) + if (!codec->jackpoll_interval) return;
/* the power-up/down sequence triggers the runtime resume */ - snd_hda_power_up_pm(codec); + snd_hda_power_up(codec); /* update jacks manually if polling is required, too */ - if (codec->jackpoll_interval) { - snd_hda_jack_set_dirty_all(codec); - snd_hda_jack_poll_all(codec); - } - snd_hda_power_down_pm(codec); - - if (!codec->jackpoll_interval) - return; - - schedule_delayed_work(&codec->jackpoll_work, - codec->jackpoll_interval); + snd_hda_jack_set_dirty_all(codec); + snd_hda_jack_poll_all(codec); + schedule_delayed_work(&codec->jackpoll_work, codec->jackpoll_interval); + snd_hda_power_down(codec); }
/* release all pincfg lists */ @@ -2920,12 +2912,12 @@ static void hda_call_codec_resume(struct hda_codec *codec) snd_hda_regmap_sync(codec); }
- if (codec->jackpoll_interval) - hda_jackpoll_work(&codec->jackpoll_work.work); - else - snd_hda_jack_report_sync(codec); + snd_hda_jack_report_sync(codec); codec->core.dev.power.power_state = PMSG_ON; snd_hdac_leave_pm(&codec->core); + if (codec->jackpoll_interval) + schedule_delayed_work(&codec->jackpoll_work, + codec->jackpoll_interval); }
static int hda_codec_runtime_suspend(struct device *dev) @@ -2937,8 +2929,6 @@ static int hda_codec_runtime_suspend(struct device *dev) if (!codec->card) return 0;
- cancel_delayed_work_sync(&codec->jackpoll_work); - state = hda_call_codec_suspend(codec); if (codec->link_down_at_suspend || (codec_has_clkstop(codec) && codec_has_epss(codec) && @@ -2946,10 +2936,6 @@ static int hda_codec_runtime_suspend(struct device *dev) snd_hdac_codec_link_down(&codec->core); snd_hda_codec_display_power(codec, false);
- if (codec->bus->jackpoll_in_suspend && - (dev->power.power_state.event != PM_EVENT_SUSPEND)) - schedule_delayed_work(&codec->jackpoll_work, - codec->jackpoll_interval); return 0; }
@@ -3052,6 +3038,7 @@ void snd_hda_codec_shutdown(struct hda_codec *codec) if (!codec->core.registered) return;
+ codec->jackpoll_interval = 0; /* don't poll any longer */ cancel_delayed_work_sync(&codec->jackpoll_work); list_for_each_entry(cpcm, &codec->pcm_list_head, list) snd_pcm_suspend_all(cpcm->pcm); @@ -3118,10 +3105,11 @@ int snd_hda_codec_build_controls(struct hda_codec *codec) if (err < 0) return err;
+ snd_hda_jack_report_sync(codec); /* call at the last init point */ if (codec->jackpoll_interval) - hda_jackpoll_work(&codec->jackpoll_work.work); - else - snd_hda_jack_report_sync(codec); /* call at the last init point */ + schedule_delayed_work(&codec->jackpoll_work, + codec->jackpoll_interval); + sync_power_up_states(codec); return 0; } diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index 94b452595f30..851e9231bbbd 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -4411,7 +4411,7 @@ static int add_tuning_control(struct hda_codec *codec, } knew.private_value = HDA_COMPOSE_AMP_VAL(nid, 1, 0, type); - sprintf(namestr, "%s %s Volume", name, dirstr[dir]); + snprintf(namestr, sizeof(namestr), "%s %s Volume", name, dirstr[dir]); return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec)); }
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index e12e3134b5e1..d4bc80780a1f 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -10071,6 +10071,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360), SND_PCI_QUIRK(0x103c, 0x8537, "HP ProBook 440 G6", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x8548, "HP EliteBook x360 830 G6", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x854a, "HP EliteBook 830 G6", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x85c6, "HP Pavilion x360 Convertible 14-dy1xxx", ALC295_FIXUP_HP_MUTE_LED_COEFBIT11), SND_PCI_QUIRK(0x103c, 0x85de, "HP Envy x360 13-ar0xxx", ALC285_FIXUP_HP_ENVY_X360), SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT), @@ -10636,6 +10638,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC), + SND_PCI_QUIRK(0x1ee7, 0x2078, "HONOR BRB-X M1010", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1f66, 0x0105, "Ayaneo Portable Game Player", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x2014, 0x800a, "Positivo ARN50", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), @@ -10651,6 +10654,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0xf111, 0x0009, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0xf111, 0x000b, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0xf111, 0x000c, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
#if 0 diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c index ae285c0a629c..f3df6fe2b7f1 100644 --- a/sound/pci/intel8x0.c +++ b/sound/pci/intel8x0.c @@ -2252,7 +2252,7 @@ static int snd_intel8x0_mixer(struct intel8x0 *chip, int ac97_clock, tmp |= chip->ac97_sdin[0] << ICH_DI1L_SHIFT; for (i = 1; i < 4; i++) { if (pcm->r[0].codec[i]) { - tmp |= chip->ac97_sdin[pcm->r[0].codec[1]->num] << ICH_DI2L_SHIFT; + tmp |= chip->ac97_sdin[pcm->r[0].codec[i]->num] << ICH_DI2L_SHIFT; break; } } diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c index 8b6b76029694..0ddfb0cb376f 100644 --- a/sound/soc/codecs/hdac_hdmi.c +++ b/sound/soc/codecs/hdac_hdmi.c @@ -1230,7 +1230,8 @@ static int hdac_hdmi_parse_eld(struct hdac_device *hdev, >> DRM_ELD_VER_SHIFT;
if (ver != ELD_VER_CEA_861D && ver != ELD_VER_PARTIAL) { - dev_err(&hdev->dev, "HDMI: Unknown ELD version %d\n", ver); + dev_err_ratelimited(&hdev->dev, + "HDMI: Unknown ELD version %d\n", ver); return -EINVAL; }
@@ -1238,7 +1239,8 @@ static int hdac_hdmi_parse_eld(struct hdac_device *hdev, DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
if (mnl > ELD_MAX_MNL) { - dev_err(&hdev->dev, "HDMI: MNL Invalid %d\n", mnl); + dev_err_ratelimited(&hdev->dev, + "HDMI: MNL Invalid %d\n", mnl); return -EINVAL; }
@@ -1297,8 +1299,8 @@ static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin,
if (!port->eld.monitor_present || !port->eld.eld_valid) {
- dev_err(&hdev->dev, "%s: disconnect for pin:port %d:%d\n", - __func__, pin->nid, port->id); + dev_dbg(&hdev->dev, "%s: disconnect for pin:port %d:%d\n", + __func__, pin->nid, port->id);
/* * PCMs are not registered during device probe, so don't diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c index 1955d77cffd9..0f250e8e216a 100644 --- a/sound/soc/codecs/rt5640.c +++ b/sound/soc/codecs/rt5640.c @@ -3016,6 +3016,11 @@ static int rt5640_i2c_probe(struct i2c_client *i2c) }
regmap_read(rt5640->regmap, RT5640_VENDOR_ID2, &val); + if (val != RT5640_DEVICE_ID) { + usleep_range(60000, 100000); + regmap_read(rt5640->regmap, RT5640_VENDOR_ID2, &val); + } + if (val != RT5640_DEVICE_ID) { dev_err(&i2c->dev, "Device with ID register %#x is not rt5640/39\n", val); diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index 886f5c29939b..a6948a57636a 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c @@ -768,9 +768,9 @@ static void fsl_sai_config_disable(struct fsl_sai *sai, int dir) * are running concurrently. */ /* Software Reset */ - regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR); + regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR); /* Clear SR bit to finish the reset */ - regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), 0); + regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR, 0); }
static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd, @@ -889,11 +889,11 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai) unsigned int ofs = sai->soc_data->reg_offset;
/* Software Reset for both Tx and Rx */ - regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR); - regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR); + regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR); + regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR); /* Clear SR bit to finish the reset */ - regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), 0); - regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), 0); + regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR, 0); + regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR, 0);
regmap_update_bits(sai->regmap, FSL_SAI_TCR1(ofs), FSL_SAI_CR1_RFW_MASK(sai->soc_data->fifo_depth), @@ -1710,11 +1710,11 @@ static int fsl_sai_runtime_resume(struct device *dev)
regcache_cache_only(sai->regmap, false); regcache_mark_dirty(sai->regmap); - regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR); - regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR); + regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR); + regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR); usleep_range(1000, 2000); - regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), 0); - regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), 0); + regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR, 0); + regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR, 0);
ret = regcache_sync(sai->regmap); if (ret) diff --git a/sound/soc/intel/avs/core.c b/sound/soc/intel/avs/core.c index 63e4356e8caf..8f36cef88fe6 100644 --- a/sound/soc/intel/avs/core.c +++ b/sound/soc/intel/avs/core.c @@ -415,6 +415,8 @@ static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL); if (!adev) return -ENOMEM; + bus = &adev->base.core; + ret = avs_bus_init(adev, pci, id); if (ret < 0) { dev_err(dev, "failed to init avs bus: %d\n", ret); @@ -425,7 +427,6 @@ static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) if (ret < 0) return ret;
- bus = &adev->base.core; bus->addr = pci_resource_start(pci, 0); bus->remap_addr = pci_ioremap_bar(pci, 0); if (!bus->remap_addr) { diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c index f918d9e16dc0..f342bc4b3a14 100644 --- a/sound/soc/qcom/lpass-platform.c +++ b/sound/soc/qcom/lpass-platform.c @@ -201,7 +201,6 @@ static int lpass_platform_pcmops_open(struct snd_soc_component *component, struct regmap *map; unsigned int dai_id = cpu_dai->driver->id;
- component->id = dai_id; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; @@ -1189,13 +1188,14 @@ static int lpass_platform_pcmops_suspend(struct snd_soc_component *component) { struct lpass_data *drvdata = snd_soc_component_get_drvdata(component); struct regmap *map; - unsigned int dai_id = component->id;
- if (dai_id == LPASS_DP_RX) + if (drvdata->hdmi_port_enable) { map = drvdata->hdmiif_map; - else - map = drvdata->lpaif_map; + regcache_cache_only(map, true); + regcache_mark_dirty(map); + }
+ map = drvdata->lpaif_map; regcache_cache_only(map, true); regcache_mark_dirty(map);
@@ -1206,14 +1206,19 @@ static int lpass_platform_pcmops_resume(struct snd_soc_component *component) { struct lpass_data *drvdata = snd_soc_component_get_drvdata(component); struct regmap *map; - unsigned int dai_id = component->id; + int ret;
- if (dai_id == LPASS_DP_RX) + if (drvdata->hdmi_port_enable) { map = drvdata->hdmiif_map; - else - map = drvdata->lpaif_map; + regcache_cache_only(map, false); + ret = regcache_sync(map); + if (ret) + return ret; + }
+ map = drvdata->lpaif_map; regcache_cache_only(map, false); + return regcache_sync(map); }
@@ -1223,7 +1228,9 @@ static int lpass_platform_copy(struct snd_soc_component *component, unsigned long bytes) { struct snd_pcm_runtime *rt = substream->runtime; - unsigned int dai_id = component->id; + struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream); + struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0); + unsigned int dai_id = cpu_dai->driver->id; int ret = 0;
void __iomem *dma_buf = (void __iomem *) (rt->dma_area + pos + diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 7eea70eea68b..dc95b6f41555 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -1025,6 +1025,9 @@ static int soc_dai_link_sanity_check(struct snd_soc_card *card, void snd_soc_remove_pcm_runtime(struct snd_soc_card *card, struct snd_soc_pcm_runtime *rtd) { + if (!rtd) + return; + lockdep_assert_held(&client_mutex);
/* diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 7729f8f4d5e6..7facb7b2dba1 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -734,6 +734,10 @@ static int snd_soc_dapm_set_bias_level(struct snd_soc_dapm_context *dapm, out: trace_snd_soc_bias_level_done(card, level);
+ /* success */ + if (ret == 0) + snd_soc_dapm_init_bias_level(dapm, level); + return ret; }
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index be0b3c8ac705..f2cce15be4e2 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -2150,15 +2150,15 @@ static int dell_dock_mixer_init(struct usb_mixer_interface *mixer) #define SND_RME_CLK_FREQMUL_SHIFT 18 #define SND_RME_CLK_FREQMUL_MASK 0x7 #define SND_RME_CLK_SYSTEM(x) \ - ((x >> SND_RME_CLK_SYSTEM_SHIFT) & SND_RME_CLK_SYSTEM_MASK) + (((x) >> SND_RME_CLK_SYSTEM_SHIFT) & SND_RME_CLK_SYSTEM_MASK) #define SND_RME_CLK_AES(x) \ - ((x >> SND_RME_CLK_AES_SHIFT) & SND_RME_CLK_AES_SPDIF_MASK) + (((x) >> SND_RME_CLK_AES_SHIFT) & SND_RME_CLK_AES_SPDIF_MASK) #define SND_RME_CLK_SPDIF(x) \ - ((x >> SND_RME_CLK_SPDIF_SHIFT) & SND_RME_CLK_AES_SPDIF_MASK) + (((x) >> SND_RME_CLK_SPDIF_SHIFT) & SND_RME_CLK_AES_SPDIF_MASK) #define SND_RME_CLK_SYNC(x) \ - ((x >> SND_RME_CLK_SYNC_SHIFT) & SND_RME_CLK_SYNC_MASK) + (((x) >> SND_RME_CLK_SYNC_SHIFT) & SND_RME_CLK_SYNC_MASK) #define SND_RME_CLK_FREQMUL(x) \ - ((x >> SND_RME_CLK_FREQMUL_SHIFT) & SND_RME_CLK_FREQMUL_MASK) + (((x) >> SND_RME_CLK_FREQMUL_SHIFT) & SND_RME_CLK_FREQMUL_MASK) #define SND_RME_CLK_AES_LOCK 0x1 #define SND_RME_CLK_AES_SYNC 0x4 #define SND_RME_CLK_SPDIF_LOCK 0x2 @@ -2167,9 +2167,9 @@ static int dell_dock_mixer_init(struct usb_mixer_interface *mixer) #define SND_RME_SPDIF_FORMAT_SHIFT 5 #define SND_RME_BINARY_MASK 0x1 #define SND_RME_SPDIF_IF(x) \ - ((x >> SND_RME_SPDIF_IF_SHIFT) & SND_RME_BINARY_MASK) + (((x) >> SND_RME_SPDIF_IF_SHIFT) & SND_RME_BINARY_MASK) #define SND_RME_SPDIF_FORMAT(x) \ - ((x >> SND_RME_SPDIF_FORMAT_SHIFT) & SND_RME_BINARY_MASK) + (((x) >> SND_RME_SPDIF_FORMAT_SHIFT) & SND_RME_BINARY_MASK)
static const u32 snd_rme_rate_table[] = { 32000, 44100, 48000, 50000, diff --git a/sound/usb/stream.c b/sound/usb/stream.c index 0f1558ef8555..12a5e053ec54 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c @@ -341,20 +341,28 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor
len = le16_to_cpu(cluster->wLength); c = 0; - p += sizeof(struct uac3_cluster_header_descriptor); + p += sizeof(*cluster); + len -= sizeof(*cluster);
- while (((p - (void *)cluster) < len) && (c < channels)) { + while (len > 0 && (c < channels)) { struct uac3_cluster_segment_descriptor *cs_desc = p; u16 cs_len; u8 cs_type;
+ if (len < sizeof(*cs_desc)) + break; cs_len = le16_to_cpu(cs_desc->wLength); + if (len < cs_len) + break; cs_type = cs_desc->bSegmentType;
if (cs_type == UAC3_CHANNEL_INFORMATION) { struct uac3_cluster_information_segment_descriptor *is = p; unsigned char map;
+ if (cs_len < sizeof(*is)) + break; + /* * TODO: this conversion is not complete, update it * after adding UAC3 values to asound.h @@ -456,6 +464,7 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor chmap->map[c++] = map; } p += cs_len; + len -= cs_len; }
if (channels < c) @@ -876,7 +885,7 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip, u64 badd_formats = 0; unsigned int num_channels; struct audioformat *fp; - u16 cluster_id, wLength; + u16 cluster_id, wLength, cluster_wLength; int clock = 0; int err;
@@ -1005,6 +1014,16 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip, return ERR_PTR(-EIO); }
+ cluster_wLength = le16_to_cpu(cluster->wLength); + if (cluster_wLength < sizeof(*cluster) || + cluster_wLength > wLength) { + dev_err(&dev->dev, + "%u:%d : invalid Cluster Descriptor size\n", + iface_no, altno); + kfree(cluster); + return ERR_PTR(-EIO); + } + num_channels = cluster->bNrChannels; chmap = convert_chmap_v3(cluster); kfree(cluster); diff --git a/sound/usb/validate.c b/sound/usb/validate.c index 6fe206f6e911..a0d55b77c994 100644 --- a/sound/usb/validate.c +++ b/sound/usb/validate.c @@ -221,6 +221,17 @@ static bool validate_uac3_feature_unit(const void *p, return d->bLength >= sizeof(*d) + 4 + 2; }
+static bool validate_uac3_power_domain_unit(const void *p, + const struct usb_desc_validator *v) +{ + const struct uac3_power_domain_descriptor *d = p; + + if (d->bLength < sizeof(*d)) + return false; + /* baEntities[] + wPDomainDescrStr */ + return d->bLength >= sizeof(*d) + d->bNrEntities + 2; +} + static bool validate_midi_out_jack(const void *p, const struct usb_desc_validator *v) { @@ -274,7 +285,7 @@ static const struct usb_desc_validator audio_validators[] = { /* UAC_VERSION_3, UAC3_EXTENDED_TERMINAL: not implemented yet */ FUNC(UAC_VERSION_3, UAC3_MIXER_UNIT, validate_mixer_unit), FUNC(UAC_VERSION_3, UAC3_SELECTOR_UNIT, validate_selector_unit), - FUNC(UAC_VERSION_3, UAC_FEATURE_UNIT, validate_uac3_feature_unit), + FUNC(UAC_VERSION_3, UAC3_FEATURE_UNIT, validate_uac3_feature_unit), /* UAC_VERSION_3, UAC3_EFFECT_UNIT: not implemented yet */ FUNC(UAC_VERSION_3, UAC3_PROCESSING_UNIT, validate_processing_unit), FUNC(UAC_VERSION_3, UAC3_EXTENSION_UNIT, validate_processing_unit), @@ -285,6 +296,7 @@ static const struct usb_desc_validator audio_validators[] = { struct uac3_clock_multiplier_descriptor), /* UAC_VERSION_3, UAC3_SAMPLE_RATE_CONVERTER: not implemented yet */ /* UAC_VERSION_3, UAC3_CONNECTORS: not implemented yet */ + FUNC(UAC_VERSION_3, UAC3_POWER_DOMAIN, validate_uac3_power_domain_unit), { } /* terminator */ };
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index 08d0ac543c67..a0536528dfde 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -534,9 +534,9 @@ int main(int argc, char **argv) usage();
if (version_requested) - return do_version(argc, argv); - - ret = cmd_select(commands, argc, argv, do_help); + ret = do_version(argc, argv); + else + ret = cmd_select(commands, argc, argv, do_help);
if (json_output) jsonw_destroy(&json_wtr); diff --git a/tools/include/nolibc/std.h b/tools/include/nolibc/std.h index 933bc0be7e1c..a9d8b5b51f37 100644 --- a/tools/include/nolibc/std.h +++ b/tools/include/nolibc/std.h @@ -20,6 +20,8 @@
#include "stdint.h"
+#include <linux/types.h> + /* those are commonly provided by sys/types.h */ typedef unsigned int dev_t; typedef unsigned long ino_t; @@ -31,6 +33,6 @@ typedef unsigned long nlink_t; typedef signed long off_t; typedef signed long blksize_t; typedef signed long blkcnt_t; -typedef signed long time_t; +typedef __kernel_old_time_t time_t;
#endif /* _NOLIBC_STD_H */ diff --git a/tools/include/nolibc/types.h b/tools/include/nolibc/types.h index 8cfc4c860fa4..48dca7b188d0 100644 --- a/tools/include/nolibc/types.h +++ b/tools/include/nolibc/types.h @@ -128,7 +128,7 @@ typedef struct { int __fd = (fd); \ if (__fd >= 0) \ __set->fds[__fd / FD_SETIDXMASK] &= \ - ~(1U << (__fd & FX_SETBITMASK)); \ + ~(1U << (__fd & FD_SETBITMASK)); \ } while (0)
#define FD_SET(fd, set) do { \ @@ -145,7 +145,7 @@ typedef struct { int __r = 0; \ if (__fd >= 0) \ __r = !!(__set->fds[__fd / FD_SETIDXMASK] & \ -1U << (__fd & FD_SET_BITMASK)); \ +1U << (__fd & FD_SETBITMASK)); \ __r; \ })
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index 39e659c83cfd..cb8b0a3029d3 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -865,6 +865,7 @@ enum { IFLA_BOND_AD_LACP_ACTIVE, IFLA_BOND_MISSED_MAX, IFLA_BOND_NS_IP6_TARGET, + IFLA_BOND_COUPLED_CONTROL, __IFLA_BOND_MAX, };
diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c index 08a399b0be28..6ab9139f16af 100644 --- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c +++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c @@ -240,9 +240,9 @@ static int mperf_stop(void) int cpu;
for (cpu = 0; cpu < cpu_count; cpu++) { - mperf_measure_stats(cpu); - mperf_get_tsc(&tsc_at_measure_end[cpu]); clock_gettime(CLOCK_REALTIME, &time_end[cpu]); + mperf_get_tsc(&tsc_at_measure_end[cpu]); + mperf_measure_stats(cpu); }
return 0; diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include index ff527ac065cf..c006e72b4f43 100644 --- a/tools/scripts/Makefile.include +++ b/tools/scripts/Makefile.include @@ -98,7 +98,9 @@ else ifneq ($(CROSS_COMPILE),) # Allow userspace to override CLANG_CROSS_FLAGS to specify their own # sysroots and flags or to avoid the GCC call in pure Clang builds. ifeq ($(CLANG_CROSS_FLAGS),) -CLANG_CROSS_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%)) +CLANG_TARGET := $(notdir $(CROSS_COMPILE:%-=%)) +CLANG_TARGET := $(subst s390-linux,s390x-linux,$(CLANG_TARGET)) +CLANG_CROSS_FLAGS := --target=$(CLANG_TARGET) GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)gcc 2>/dev/null)) ifneq ($(GCC_TOOLCHAIN_DIR),) CLANG_CROSS_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 331601575743..a8979280b505 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -1358,7 +1358,10 @@ sub __eval_option { # If a variable contains itself, use the default var if (($var eq $name) && defined($opt{$var})) { $o = $opt{$var}; - $retval = "$retval$o"; + # Only append if the default doesn't contain itself + if ($o !~ m/${$var}/) { + $retval = "$retval$o"; + } } elsif (defined($opt{$o})) { $o = $opt{$o}; $retval = "$retval$o"; diff --git a/tools/testing/selftests/arm64/fp/sve-ptrace.c b/tools/testing/selftests/arm64/fp/sve-ptrace.c index c6228176dd1a..408fb1c5c2f8 100644 --- a/tools/testing/selftests/arm64/fp/sve-ptrace.c +++ b/tools/testing/selftests/arm64/fp/sve-ptrace.c @@ -168,7 +168,7 @@ static void ptrace_set_get_inherit(pid_t child, const struct vec_type *type) memset(&sve, 0, sizeof(sve)); sve.size = sizeof(sve); sve.vl = sve_vl_from_vq(SVE_VQ_MIN); - sve.flags = SVE_PT_VL_INHERIT; + sve.flags = SVE_PT_VL_INHERIT | SVE_PT_REGS_SVE; ret = set_sve(child, type, &sve); if (ret != 0) { ksft_test_result_fail("Failed to set %s SVE_PT_VL_INHERIT\n", @@ -233,6 +233,7 @@ static void ptrace_set_get_vl(pid_t child, const struct vec_type *type, /* Set the VL by doing a set with no register payload */ memset(&sve, 0, sizeof(sve)); sve.size = sizeof(sve); + sve.flags = SVE_PT_REGS_SVE; sve.vl = vl; ret = set_sve(child, type, &sve); if (ret != 0) { diff --git a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c index dfff6feac12c..7e9a508c1571 100644 --- a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c +++ b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c @@ -21,8 +21,7 @@ #include "../progs/test_user_ringbuf.h"
static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ; -static const long c_ringbuf_size = 1 << 12; /* 1 small page */ -static const long c_max_entries = c_ringbuf_size / c_sample_size; +static long c_ringbuf_size, c_max_entries;
static void drain_current_samples(void) { @@ -424,7 +423,9 @@ static void test_user_ringbuf_loop(void) uint32_t remaining_samples = total_samples; int err;
- BUILD_BUG_ON(total_samples <= c_max_entries); + if (!ASSERT_LT(c_max_entries, total_samples, "compare_c_max_entries")) + return; + err = load_skel_create_user_ringbuf(&skel, &ringbuf); if (err) return; @@ -686,6 +687,9 @@ void test_user_ringbuf(void) { int i;
+ c_ringbuf_size = getpagesize(); /* 1 page */ + c_max_entries = c_ringbuf_size / c_sample_size; + for (i = 0; i < ARRAY_SIZE(success_tests); i++) { if (!test__start_subtest(success_tests[i].test_name)) continue; diff --git a/tools/testing/selftests/bpf/progs/verifier_unpriv.c b/tools/testing/selftests/bpf/progs/verifier_unpriv.c index 7ea535bfbacd..e4ef82a6ee38 100644 --- a/tools/testing/selftests/bpf/progs/verifier_unpriv.c +++ b/tools/testing/selftests/bpf/progs/verifier_unpriv.c @@ -619,7 +619,7 @@ __naked void pass_pointer_to_tail_call(void)
SEC("socket") __description("unpriv: cmp map pointer with zero") -__success __failure_unpriv __msg_unpriv("R1 pointer comparison") +__success __success_unpriv __retval(0) __naked void cmp_map_pointer_with_zero(void) { diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc index 4b994b6df5ac..ed81eaf2afd6 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc @@ -29,7 +29,7 @@ ftrace_filter_check 'schedule*' '^schedule.*$' ftrace_filter_check '*pin*lock' '.*pin.*lock$'
# filter by start*mid* -ftrace_filter_check 'mutex*try*' '^mutex.*try.*' +ftrace_filter_check 'mutex*unl*' '^mutex.*unl.*'
# Advanced full-glob matching feature is recently supported. # Skip the tests if we are sure the kernel does not support it. diff --git a/tools/testing/selftests/futex/include/futextest.h b/tools/testing/selftests/futex/include/futextest.h index ddbcfc9b7bac..7a5fd1d5355e 100644 --- a/tools/testing/selftests/futex/include/futextest.h +++ b/tools/testing/selftests/futex/include/futextest.h @@ -47,6 +47,17 @@ typedef volatile u_int32_t futex_t; FUTEX_PRIVATE_FLAG) #endif
+/* + * SYS_futex is expected from system C library, in glibc some 32-bit + * architectures (e.g. RV32) are using 64-bit time_t, therefore it doesn't have + * SYS_futex defined but just SYS_futex_time64. Define SYS_futex as + * SYS_futex_time64 in this situation to ensure the compilation and the + * compatibility. + */ +#if !defined(SYS_futex) && defined(SYS_futex_time64) +#define SYS_futex SYS_futex_time64 +#endif + /** * futex() - SYS_futex syscall wrapper * @uaddr: address of first futex diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c index e92b60eecb7d..9c9c82fd18a7 100644 --- a/tools/testing/selftests/memfd/memfd_test.c +++ b/tools/testing/selftests/memfd/memfd_test.c @@ -285,6 +285,24 @@ static void *mfd_assert_mmap_shared(int fd) return p; }
+static void *mfd_assert_mmap_read_shared(int fd) +{ + void *p; + + p = mmap(NULL, + mfd_def_size, + PROT_READ, + MAP_SHARED, + fd, + 0); + if (p == MAP_FAILED) { + printf("mmap() failed: %m\n"); + abort(); + } + + return p; +} + static void *mfd_assert_mmap_private(int fd) { void *p; @@ -986,6 +1004,30 @@ static void test_seal_future_write(void) close(fd); }
+static void test_seal_write_map_read_shared(void) +{ + int fd; + void *p; + + printf("%s SEAL-WRITE-MAP-READ\n", memfd_str); + + fd = mfd_assert_new("kern_memfd_seal_write_map_read", + mfd_def_size, + MFD_CLOEXEC | MFD_ALLOW_SEALING); + + mfd_assert_add_seals(fd, F_SEAL_WRITE); + mfd_assert_has_seals(fd, F_SEAL_WRITE); + + p = mfd_assert_mmap_read_shared(fd); + + mfd_assert_read(fd); + mfd_assert_read_shared(fd); + mfd_fail_write(fd); + + munmap(p, mfd_def_size); + close(fd); +} + /* * Test SEAL_SHRINK * Test whether SEAL_SHRINK actually prevents shrinking @@ -1603,6 +1645,7 @@ int main(int argc, char **argv)
test_seal_write(); test_seal_future_write(); + test_seal_write_map_read_shared(); test_seal_shrink(); test_seal_grow(); test_seal_resize(); diff --git a/tools/testing/selftests/net/mptcp/pm_netlink.sh b/tools/testing/selftests/net/mptcp/pm_netlink.sh index 71899a3ffa7a..3528e730e4d3 100755 --- a/tools/testing/selftests/net/mptcp/pm_netlink.sh +++ b/tools/testing/selftests/net/mptcp/pm_netlink.sh @@ -134,6 +134,7 @@ ip netns exec $ns1 ./pm_nl_ctl limits 1 9 2>/dev/null check "ip netns exec $ns1 ./pm_nl_ctl limits" "$default_limits" "subflows above hard limit"
ip netns exec $ns1 ./pm_nl_ctl limits 8 8 +ip netns exec $ns1 ./pm_nl_ctl flush check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 8 subflows 8" "set limits"
linux-stable-mirror@lists.linaro.org